././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6597588 os_brick-6.11.0/0000775000175000017500000000000000000000000013415 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/.coveragerc0000664000175000017500000000013500000000000015535 0ustar00zuulzuul00000000000000[run] branch = True source = os_brick omit = os_brick/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/.mailmap0000664000175000017500000000013100000000000015031 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/.pylintrc0000664000175000017500000001522700000000000015271 0ustar00zuulzuul00000000000000[MASTER] # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. extension-pkg-whitelist= # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS,tests,test # Add files or directories matching the regex patterns to the blacklist. The # regex matches against base names, not paths. ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use. jobs=0 # Control the amount of potential inferred values when inferring a single # object. This can help the performance when dealing with large functions or # complex, nested conditions. limit-inference-results=100 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Pickle collected data for later comparisons. persistent=yes # Specify a configuration file. #rcfile= # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. confidence= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once). You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, c-extension-no-member, # "E" Error for important programming issues (likely bugs) access-member-before-definition, bad-super-call, no-member, no-method-argument, no-name-in-module, no-self-argument, no-value-for-parameter, unsubscriptable-object, method-hidden, not-callable, keyword-arg-before-vararg, too-many-function-args, unsupported-assignment-operation, not-an-iterable, unsupported-membership-test, unsupported-assignment-operation, raising-bad-type, bad-option-value, unexpected-keyword-arg, assignment-from-none, assignment-from-no-return, # "W" Warnings for stylistic problems or minor programming issues exec-used, pointless-statement, unnecessary-lambda, abstract-method, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, deprecated-lambda, expression-not-assigned, fixme, global-statement, global-variable-not-assigned, no-init, non-parent-init-called, protected-access, redefined-builtin, redefined-outer-name, reimported, signature-differs, star-args, super-init-not-called, unpacking-non-sequence, unused-argument, unused-import, undefined-loop-variable, bad-staticmethod-argument, deprecated-method, useless-else-on-loop, lost-exception, pointless-string-statement, useless-super-delegation, deprecated-method, dangerous-default-value, wildcard-import, bad-staticmethod-argument, eval-used, blacklisted-name, pointless-statement, try-except-raise, # "C" Coding convention violations bad-continuation, invalid-name, missing-docstring, old-style-class, superfluous-parens, wrong-import-position, wrong-import-order, ungrouped-imports, unused-variable, len-as-condition, cell-var-from-loop, singleton-comparison, misplaced-comparison-constant, unidiomatic-typecheck, consider-using-enumerate, bad-whitespace, line-too-long, useless-super-delegation, pointless-string-statement, unsupported-membership-test, bad-classmethod-argument, bad-mcs-classmethod-argument, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, duplicate-code, interface-not-implemented, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements, too-many-nested-blocks, no-else-return, inconsistent-return-statements, simplifiable-if-statement, too-many-boolean-expressions, cyclic-import, redefined-argument-from-local, consider-using-ternary, literal-comparison, too-many-boolean-expressions, useless-object-inheritance, trailing-comma-tuple, useless-object-inheritance, consider-using-set-comprehension, consider-using-in, useless-return, chained-comparison [REPORTS] # Tells whether to display a full report or only the messages. reports=no [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ mixin-class-rgx=(^(ManageResource)$|.*[Mm]ixin) [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins=_ [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems,alembic.context,alembic.op, alembic.config,pyxcli,storpool,oslo_privsep.capabilities signature-mutators=unittest.mock.patch,unittest.mock.patch.object,sqlalchemy.util._preloaded.dependencies # This is for cinder.objects.*, and requests.packages.*, but due to # https://github.com/PyCQA/pylint/issues/2498 # it doesn't seem that generated-members can be specified correctly. # Clean this up later when pylint works correctly. generated-members=objects,requests ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/.stestr.conf0000664000175000017500000000010100000000000015656 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./os_brick/tests} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/.zuul.yaml0000664000175000017500000000573700000000000015372 0ustar00zuulzuul00000000000000- project: templates: - check-requirements - lib-forward-testing-python3 - openstack-python3-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - os-brick-code-coverage: voting: false - os-brick-src-devstack-plugin-ceph-v - os-brick-src-devstack-plugin-ceph-nv - os-brick-src-tempest-lvm-lio-barbican - os-brick-src-tempest-nfs: voting: false - os-brick-mypy: voting: false gate: jobs: - os-brick-src-tempest-lvm-lio-barbican experimental: jobs: - openstack-tox-pylint - job: name: os-brick-code-coverage parent: openstack-tox-cover timeout: 2400 irrelevant-files: &non-code-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - ^tools/.*$ - job: name: os-brick-src-devstack-plugin-ceph abstract: true description: | Tempest job which tests os-brick from source. Former names for this job were: * legacy-tempest-dsvm-full-ceph-plugin-src-os-brick parent: cinder-plugin-ceph-tempest required-projects: - opendev.org/openstack/os-brick irrelevant-files: &doc-files - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - job: name: os-brick-src-devstack-plugin-ceph-v final: true description: | Runs tempest tests on os-brick source against ceph. This voting job runs on changes that could affect rbd. parent: os-brick-src-devstack-plugin-ceph files: - ^os_brick/initiator/connector.py$ - ^os_brick/initiator/connectors/rbd.py$ - ^os_brick/initiator/linuxrbd.py$ - ^os_brick/utils.py$ - job: name: os-brick-src-devstack-plugin-ceph-nv final: true voting: false description: | Runs tempest tests on os-brick source against ceph. This non-voting job runs changes that don't specifically impact ceph-related os-brick code. parent: os-brick-src-devstack-plugin-ceph irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - ^os_brick/initiator/connector.py$ - ^os_brick/initiator/connectors/rbd.py$ - ^os_brick/initiator/linuxrbd.py$ - ^os_brick/utils.py$ - job: name: os-brick-src-tempest-lvm-lio-barbican parent: cinder-tempest-plugin-lvm-lio-barbican description: | Specialized cinder-tempest-lvm-lio-barbican which runs against os-brick from sources. Former names for this job were: * legacy-tempest-dsvm-full-lio-src-os-brick required-projects: - opendev.org/openstack/os-brick irrelevant-files: *doc-files - job: name: os-brick-src-tempest-nfs parent: devstack-plugin-nfs-tempest-full required-projects: - opendev.org/openstack/os-brick irrelevant-files: *doc-files - job: name: os-brick-mypy parent: openstack-tox vars: tox_envlist: mypy irrelevant-files: *non-code-files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/AUTHORS0000664000175000017500000001574100000000000014475 0ustar00zuulzuul00000000000000Ade Lee Alan Bishop Alex Kavanagh Alfredo Moralejo Andreas Jaeger Andreas Scheuring Angus Lees Anish Bhatt Anthony Lee Arne Recknagel Arnon Yaari Aviram Bar-Haim Avishay Traeger Bertrand Lallau Biser Milanov Brian Rosmaita Cao Xuan Hoang ChangBo Guo(gcb) Charles Short Charles Short Chhavi Agarwal Chris M Chris MacNaughton Christopher Uhler Chuck Short Corey Bryant Daniel Pawlik David Vallee Delisle Dirk Mueller Dmitry Guryanov Dmitry Guryanov Doug Hellmann Earle F. Philhower, III Eric Harney Eric Young Erik Olof Gunnar Andersson Felipe Reyes Flavio Percoco Ghanshyam Mann Gorka Eguileor Hahyun Hamdy Khader Hemna Hervé Beraud Ivan Kolodyazhny Ivan Pchelintsev Jack Lu Jay S. Bryant Ji-Wei John Griffith Jon Bernard Jordan Pittier Jose Porrua Keiichi KII Kendall Nelson Kendall Nelson Lee Yarwood Liang Fang Lior Friedman LisaLi Liu Qing Lucian Petrut Luigi Toscano Lukas Bezdicka Luong Anh Tuan Maciej Kucia Mark Goddard Markus Hentsch Matan Sabag Mathieu Gagné Matt Riedemann Matthew Booth Michael Price Michal Dulko Michał Dulko Mike Durnosvystov Mike Perez Monty Taylor Muli Ben-Yehuda Naga Venkata Nate Potter Nilesh Thathagar Ondřej Nový OpenStack Release Bot Patricia Domingues Patrick East Pawel Kaminski Peter Penchev Peter Wang Philipp Reisner Rafael Folco Rahman LBL Rajat Dhasmana Rawan Herzallah Rikimaru Honjo Rui Yuan Dou Ryan Rossiter Sahid Orentino Ferdjaoui Sam Wan Sean McGinnis Sean McGinnis Sean McGinnis Sergey Vilgelm Shilpa Jagannath Silvan Kaiser Sofia Enriquez Sophie Huang Stefan Amann Stephen Finucane Swapnil Kulkarni (coolsvap) Szczerbik, Przemyslaw Takashi Kajinami Takashi Kajinami Takashi Natsume Tejdeep Kautharam Thelo Gaultier Thomas Bechtold Tomoki Sekiyama Tony Breeds Tony Saad Tony Xu Tushar Trambak Gite Van Hung Pham Victor Stinner Vipin Balachandran Vladislav Belogrudov Vu Cong Tuan Walter A. Boring IV Walter A. Boring IV Walter A. Boring IV Xiaojun Liao Xing Yang Yandong Xuan Yian Zong Yingxin Yong Huang Yury Kulazhenkov Yusuke Hayashi Zhao Liqiang ZhijunWei Zohar Mamedov ankitagrawal caixiaoyu caoyuan cheng cheng li dengzhaosen digvijay2016 felix23ma fuzihao haobing1 happystacker howardlee iain MacDonnell imacdonn jacky06 jeremy.zhang jichenjc kangyufei lihaijing lisali liuyamin melissaml olegnest pengyuesheng qingszhao qiufossen shenjiatong tony-saad tushargite96 wang yong wanghongxu wangxiyuan weiweigu whoami-rajat xianming mao xuanyandong yenai yuval brave yuyafei zengjia zhangboye zhangdaolong zhanghongtao zhangsong zhangyanxian zhangyanxian zhaoleilc <15247232416@163.com> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/CONTRIBUTING.rst0000664000175000017500000000113300000000000016054 0ustar00zuulzuul00000000000000The source repository for this project can be found at: https://opendev.org/openstack/os-brick Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/os-brick For more specific information about contributing to this repository, see the os-brick contributor guide: https://docs.openstack.org/os-brick/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/ChangeLog0000664000175000017500000010067300000000000015176 0ustar00zuulzuul00000000000000CHANGES ======= 6.11.0 ------ * FC: Improve exception handling during scanning * Fix: UTs in fibre channel multipath tests * mypy: Improve linuxscsi coverage * Support mypy 1.15.0 * mypy: Print mypy version used * FC: Fix multipath wait issue with slow connections * StorPool: Add the iSCSI configuration API 6.10.0 ------ * Bump hacking to 7 * StorPool: Do not use packages \`storpool\` and \`storpool.spopenstack\` * Fix multipathd dependency in NVMe-oF connections * Updating testing as per the 2025.1 cycle testing runtime * reno: Update master for unmaintained/2023.1 * Fix pdf doc build * Update master for stable/2024.2 6.9.0 ----- * Add get\_passphrase\_from\_secret utility function * Dell PowerFlex: Added retry after disconnect volume * add a new funtion to return IP address(es) of a host machine * Wait for multipath device to be ready for I/O * StorPool: Raise on spopenstack, SP\_OURID issues * storpool.py: Use StorPool's API for Attach/Detach * Fix fast8 tox env 6.8.0 ----- * Support hacking 6.1.0 * reno: Update master for unmaintained/zed * Update CI for Dalmatian * Cleanup pylintrc adjustments for six * Update master for stable/2024.1 * reno: Update master for xena Unmaintained status * reno: Update master for wallaby Unmaintained status * reno: Update master for victoria Unmaintained status 6.7.0 ----- * Deprecate Windows OS support * Add releasenote for unchecked LVM versions * Fix: FC partial target scan * reno: Update master for yoga Unmaintained status 6.6.0 ----- * Update python classifier in setup.cfg * NVMe-oF: Support nvme cli v2 * NVME-oF: Fix to support new "address" in sysfs * NVMe-oF: Fix attach when reconnecting * Dell Powerflex: Add new VOLUME\_NOT\_MAPPED\_ERROR * Silence warning when running in a container with overlayfs * Fix iSCSI disconnect\_volume when flush fails * Remove unnecessary 'type: ignore' comments * mypy: Cleanup "noqa: H301" comments * NVMe-oF: Improve hostnqn creation * Dell PowerFlex: Unnecessary login happen 6.5.0 ----- * Revert "Dell PowerFlex password appears in plain text when creating a volume" * Dell PowerFlex password appears in plain text when creating a volume from an image * Dell Powerflex: Add new VOLUME\_ALREADY\_MAPPED\_ERROR * NVMe-oF: Fix generation of hostnqn file * Update master for stable/2023.2 * Use generic testing template 6.4.0 ----- * Fix unit tests when hostid file exists * Check paths before extending device * NVMe-oF: Create /etc/nvme/hostid * tox: Don't share envdir between pep8 and fast8 * RBD: Improve close and flush in IOWrapper * SCSI: Support non SAM LUN addressing * Support hacking>=6.0.0 * LVM: Assume LVM >= 2.02.115 * LVM: Assume version >= 2.02.99 * Fix multipath resize map with friendly names * Remove egg\_info from setup.cfg * [docs] add autodoc\_mock\_imports * nvmeof: Call findmnt with -v * mypy: Fix failing mypy job * encryptors: Unbind LuksEncryptor and CryptsetupEncryptor 6.3.0 ----- * Bump mypy to 1.2.0 * Revert "Fix iSCSI disconnect\_volume when flush fails" * import pylint tooling from cinder * Support force disconnect for FC * Fix iSCSI disconnect\_volume when flush fails * LVM: Remove outdated comment * Bump mypy to 1.1.1 * LVM: Fix supports\_full\_pool\_create * Add Python 3.10 to setup.cfg metadata * Set packages in setup.cfg * Update master for stable/2023.1 * Update hacking to 5.0 6.2.0 ----- * LVM: Fix bare raise on LVM command error * Update minimum requirements in os-brick * linuxrbd: Remove rados\_connect\_timeout parameter * Fix wrong assertion methods * Bump hacking to 4.1.0 * Support separate privsep logging levels * Bump bandit to release 1.7.0 * Bump mypy version to 0.982 * Get ready for tox 4 * mypy: Annotate utils.get\_device\_size * Remove eventlet from requirements * Handle FileNotFoundError on get\_system\_uuid() * nit: correct spelling of Rescanning in debug log * Update metadata in setup.cfg * Add Python3 antelope unit tests * Update master for stable/zed * mypy: lvm.py 6.1.0 ----- * mypy: initiator/linuxrbd * Add mypy-report directory to .gitignore * RBD: Improve IOWrapper's close & flush methods * NVMe-oF: Get system uuid in privsep * mypy: Update format to future \_\_annotations\_\_ * mypy: os\_brick/encryptors/\_\_init\_\_.py * mypy: set no\_implicit\_optional * Move mypy job to check queue (non-voting) * NVMe-oF: read mdstat in Python * mypy: initiator/connectors/rbd.py * mypy: privileged/rbd.py * Bump mypy version to 0.960 * mypy: improve utils.py coverage, update format * mypy: privileged/nvmeof.py * mypy: initiator * mypy: work around mypy bug #13214 * Add flake8-logging-format extension * Fix nits in nvmeof connector * Support shared\_targets tristate value * NVMe-oF: Disconnect subsystems * NVMe-oF: Consolidate code paths * LUKS: Support extending host attached volumes 6.0.0 ----- * Support independent file lock path * Make ceph job voting (sometimes) * Update README to drop py3.6 reference * RBD: Fix disconnect\_volume for encrypted volumes * Read HBA information from sysfs * Fix Unit Test failures due to host's ANA support * Add NVMeoF Multipathing support Phase 1 (Native) implementation of NVMeoF Multipathing * Avoid volume extension errors caused by multipath-tools version * PEP8: Don't force name in TODO * Fix encryption symlink issues * Update python testing as per zed cycle teting runtime * Bump mypy version to 0.942 * NVMe-oF: Remove misleading exception from logs * Update master for stable/yoga 5.2.0 ----- * Failure to generate hostnqn in case missing "show-hostnqn" sub-command * Add "known issues" note to yoga os-brick release * nvmeof connector check controller already connected * Update requirements minima for Yoga release * Updating python testing as per Yoga testing runtime * Lightos connector - refactor disconnect volume * Robust md creation/ending * Reuse get\_host\_nqn code * nvmeof connector utilize replica\_count * NVMeOF connector support multipath-enabled kernels * releasenotes: add Lightbits LightOS connector release note * Lightbits LightOS driver * Update python testing classifier * Prevent CI from running on irrelevant files 5.1.0 ----- * Use file locks in connectors * Update notes about hacking on os-brick * multipath/iscsi: iSCSI connections are not reinitiated after reboot * Fix paths for NVMe devices (non-nguid) * Fix PowerFlex connector HTTPS certificate validation * mypy: utils.py * mypy: exception.py * Add Python3 yoga unit tests * Update master for stable/xena 5.0.0 ----- * Add support for multiple volumes within subsystem to NVMe-OF connector * rbd windows: log the device number * LVM: Retry lv calls on segfault due to lvs failure * Replace deprecated tenacity.Retrying.call * Update mypy infrastructure * Drop lower-constraints jobs * linuxscsi: Only raise and log multipathd errors when required * Remove skip\_missing\_interpreters * encryptors: Remove workaround for bug #1633518 * NVMe-oF: Return right nqn when missing hostnqn 4.4.0 ----- * Don't log error trace when nvme is not used * zuul: add mypy experimental job * trivial: correct comment * Introduce mypy * NVMe-oF: Flush on disconnect * Ussuri+ is python3 only * multipath/iscsi: remove devices from multipath monitoring * vmware: Use cookiejar from oslo.vmware client directly * setup.cfg: Replace dashes with underscores * Enable bandit runs for tox * iSCSI: Fix flushing after multipath cfg change * Add Python3 xena unit tests * Update master for stable/wallaby 4.3.0 ----- * NVMeOF connector driver connection information compatibility fix * Update requirements for wallaby release * Move os-brick to new hacking 4.0.0 * Avoid unhandled exceptions during connecting to iSCSI portals * Enforce import order with flake8-import-order * Fix import order of libraries * Remove six * Changed minversion in tox to 3.18.0 * Dropping explicit unicode literal * ScaleIO: More connection info backward compatibility * Update TOX\_CONSTRAINTS\_FILE 4.2.0 ----- * NVMeOF Connector support MDRAID replication * connector: Fix comment of get\_connector\_properties regarding enforce\_multipath * Add fixtures to test-reqs * opencas: Use BrickException instead of Exception * Update requirements * rbd Windows support * Replace deprecated UPPER\_CONSTRAINTS\_FILE variable * Improve error handling on target query * RBD: catch read exceptions prior to modifying offset 4.1.0 ----- * Add timeout default to execute command * Replace md5 with oslo version * Revert "Keep barbican integration job running on Bionic" * FC: Fix not flushing on detach * Keep barbican integration job running on Bionic * Update the OSSN-0085 note * Stop configuring install\_command in tox * New fix for rbd connector to work with ceph octopus * Revert "Fix for rbd connector to work with ceph octopus" * Add Python3 wallaby unit tests * Update master for stable/victoria 4.0.0 ----- * Fix for rbd connector to work with ceph octopus * Add volume local cache support to os-brick * Remove CORAID AOE connector * Add note about removed cinder feature * Fix pygments style * Add code coverage job * Add release note for scaleio connector upgrade * Fix a typo in the explanatory note * Leverage the iSCSI mpath to get the WWN * bindep: remind people to sync the dependencies with devstack * ScaleIO: Connection info backward compatibility * Remove DRBD connector * Remove Veritas Hyperscale connector * Remove the HGST connector * Remove the ITRI DISCO connector * prepend platform info to by-path string * RBD: Implement volume extension 3.2.1 ----- * RBD: Fix check\_valid\_device * RBD: Support non OpenStack usage * Add TODO to switch to new style rbd commands * Replace sg\_scan with lsscsi to get '[H:C:T:L]' * rbd: Warn if ceph udev rules are not configured * Bump hacking to 3.1.0 3.2.0 ----- * Switch from unittest2 compat methods to Python 3.x methods * Add doc linting to pep8 target * rbd: Support 'rbd showmapped' output from ceph 13.2.0+ * Fix os-brick in virtual environments * Bump pycodestyle to 2.6.0 * Update lower-constraints versions * Add iscsi-initiator-utils requirement 3.1.0 ----- * Remove VxFlex OS credentials from connection\_properties * Stop to use the \_\_future\_\_ module * iSCSI detect multipath DM with no WWN * Improve WWN detection * Add dependencies to releasenotes environment * Switch to newer openstackdocstheme and reno versions * Fix hacking min version to 3.0.1 * Remove translation sections from setup.cfg * rbd: Correct local\_attach disconnect test and showmapped arguments * Switch from retrying to tenacity * Add oslo.context dependency * Add oslo.serialization dependency * Fix import order per hacking guidelines * Remove Babel requirement * Bump default tox env from py37 to py38 * Add py38 package metadata * Use unittest.mock instead of third party mock * Add Python3 victoria unit tests * Cleanup py27 support * Update master for stable/ussuri 3.0.1 ----- * Add release note for ussuri cycle release * Add NFS tempest job to check queue * rbd: Use showmapped to find the root RBD device during disconnect\_volume * doc/source/conf.py is not executable * Ussuri contrib docs community goal 3.0.0 ----- * connectors/nvme: Wait until nvme device shows up in kernel * Skip cryptsetup password quality checking * Drop requirements for unsupported python versions * Raise hacking version to 2.0.0 * Read mounts from /proc/mounts instead of running mount * Remove Sheepdog connector * Port the os-bricks jobs to Zuul v3 2.11.0 ------ * StorPool: wait for the device to be resized * Remove Python 2.7 support from testing and gates * iscsi: Add \_get\_device\_link retry when waiting for /dev/disk/by-id/ to populate * StorPool: parse the output of \`blockdev\` correctly * Fix tox 'bindep' environment * Split connector list by platform * FC improve logging * Update FC connection\_properties examples * Fix FC scan too broad * nvmeof: Fix broken UTs * Remove VxFlexOS connector external dependencies * Switch to Ussuri jobs * Add linuxscsi get\_device\_info unit test * nvmeof: Use subnqn to disconnect a volume * Update the constraints url * Require oslo.privsep 1.32.0 * Update master for stable/train * Blacklist eventlet 0.25.0 * Change PDF file name 2.10.0 ------ * encryptors: Introduce support for LUKS2 * Rename nvme to nvmeof * Add pdf documentation build in tox * Fix param in s390x platform * encryptors: Deprecate the CryptsetupEncryptor * Blacklist sphinx 2.1.0 (autodoc bug) * Fix bad argument to iscsiadm in iSCSI discovery * Bump the openstackdocstheme extension to 1.20 * Delete redundant code * Sync Sphinx requirement 2.9.1 ----- * luks: Explicitly use the luks1 type to ensure LUKS v1 is used * iSCSI single path: Don't fail if there's no WWN * Add Python 3 Train unit tests * Make NFS already mounted message debug level * Check path alive before get scsi wwn * linuxscsi: Stop waiting for multipath devices during extend\_volume * luks: Default to LUKS v1 when formatting volumes * FC: Ignore some HBAs from map for single WWNN 2.9.0 ----- * Provide setting to ignore lvm descriptor leak warnings * Ignore pep8 W503/W504 * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * Add generate\_connector\_list * Fix invalid escape sequence warnings * Update master for stable/stein 2.8.1 ----- * Fix ScaleIO KeyError after upgrade * Revert "rename ScaleIO connector to VxFlex OS" * Revert "Fix VxFlexOs KeyError after upgrade" * Revert "Verify WWN of connected iSCSI devices if passed" * Remove trailing newline character in UUID * Fix VxFlexOs KeyError after upgrade * Remove py35 from setup.cfg 2.8.0 ----- * Drop py35 jobs * add python 3.7 unit test job * Fix get keyring content failed when ceph auth disabled * rename ScaleIO connector to VxFlex OS * Py3: Fix invalid escape sequencees * Fix FC case sensitive scanning * Make sure looping calls are properly mocked * Add slowest test output to end of test run * Handle None value 'inititator\_target\_map' * Don't warn on missing dmidecode * iSCSI: log exception if portals not found * VMware: Detach backing vmdk during disconnect * Verify WWN of connected iSCSI devices if passed * Add missing params in NoOpEncryptor * Update hacking version * Add retry to \`nvme connect\` in nvme connector 2.7.0 ----- * Support RSD scenario of nvme connector * Remove time checks from test\_custom\_execute\_timeout\_\* tests * Fix create ceph conf failed when cephx disable * Tests: Fix PrivRootwrapTestCase failure * Change openstack-dev to openstack-discuss * Fix NFS "already mounted" detection * Windows SMBFS: fix using share subdirs 2.6.2 ----- * removing older python version 3.4 from setup.cfg * Context manager to handle shared\_targets * Fix a spelling mistake * Fix a spelling mistake * Retry executing command "nvme list" when fail * Remove unused connection properties * Improve VolumePathsNotFound message details 2.6.1 ----- * Add LIO barbican tests to .zuul.yaml * Succeed on iSCSI detach when path just went down * Remove meanless debug log 2.6.0 ----- * 'iscsiadm -m session' failure handling * The validation of iscsi session should be case insensitive * Improve docstrings * Optimize FC device checking * Ignore volume disconnect if it is not connected * Fix spelling mistakes * Cleanup Zuul config file * add lib-forward-testing-python3 test job * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Improve detection of multipathd running * Improve iSCSI device detection speed * Replace assertRaisesRegexp with assertRaisesRegex * Add staticmethod decorator in InitiatorConnector * Modify the verification in RBDConnector * Fix multipath disconnect with path failure * Tests: Add unit tests for nfs mount race * Update reno for stable/rocky 2.5.3 ----- * FC Allow for multipath volumes with different LUNs * Windows SMBFS: avoid mounting local shares by default * Remove testrepository * RemoteFS: don't fail in do\_mount if already mounted * Add release note link in README 2.5.2 ----- * Handle multiple errors in multipath -l parsing 2.5.1 ----- * fix tox python3 overrides * Switch to using stestr * FC fix for scanning only connected HBA's 2.5.0 ----- * Trivial: Update pypi url to new url * adding sheepdog connector for PPC64 * Fix FC: Only scan connected HBAs * add lower-constraints job * Include "nqn." in subsystem name * add a getter for connector mapping * uncap eventlet * Fix bindep for multipath * Updated from global requirements 2.4.0 ----- * Accept ISCSI\_ERR\_NO\_OBJS\_FOUND from iscsiadm * Incorporate the connection\_properties input for PPC64 * Adding support to extend attached ScaleIO volumes * Windows iSCSI: ensure disks are claimed by MPIO * Updated from global requirements * Updated from global requirements * Updated from global requirements * Windows FC: fix disk scan issues * Enable hacking-extensions H204, H205 * Updated from global requirements * Update reno for stable/queens * s390x fc: Fix device path for Ubuntu with ds8k 2.3.0 ----- * adding iSER connector for PPC64 * adding VERITAS\_HYPERSCALE connector for PPC64 * Updated from global requirements * adding VZSTORAGE connector for PPC64 * Updated from global requirements * Update supported transports for iscsi connector * Remove the unnecessary pv\_list assign during LVM object init 2.2.0 ----- * Remove requirement on oslo.serialization * Cleanup test-requirements * Updated from global requirements * Windows SMBFS: allow mounting vhd/x images * Windows: fix connectors 'disconnect\_volume' signature * Avoid tox\_install.sh for constraints support * Recover node.startup values after discovering * Add the StorPool brick connector 2.1.1 ----- * set vg\_thin\_pool\_size to float type * Fix a typographical error in a release notes entry 2.1.0 ----- * Make close on cryptsetup volumes idempotent * Make close on luks volumes idempotent * Remove setting of version/release from releasenotes * Adding NVMEoF for initiator CLI * Updated from global requirements * Fixing FC scanning * Updated from global requirements * Always set ignoreskipactivation on snapshot creation 2.0.0 ----- * Enable Python hash randomization for tests * Remove legacy connector constants * Add .stestr.conf configuration * Fix \_remove\_scsi\_symlinks\_no\_links test * Protect against race within os.path.realpath * rescan fails for hba missing target wwn * Updated from global requirements * Updated from global requirements * Fix vmware migrate available volume bug * FC PPC64 device discovery issue * Add attribute 'name' to class RBDVolume * Updated from global requirements * Updated from global requirements * Fix iSCSI volume attachment over RDMA transport * doc: Restructure docs for doc-migration * doc: Remove cruft from conf.py * doc: Switch from oslosphinx to openstackdocstheme * Update reno for stable/pike * Fix ISCSIConnector.\_get\_potential\_volume\_paths logic * FC connector logs number of attempts incorrectly * Enable some off-by-default checks * Update and replace http with https for doc links * Get the right portal from output of iscsiadm command * Update and optimize documentation links * Updated from global requirements * Add client connect exception unit test for rbd 1.15.1 ------ * Don't obscure logs on iSCSI sendtargets failure * Return WWN in multipath\_id * Return symlinks for encrypted volumes 1.15.0 ------ * Revert "Don't use ignoreskipactivation for thin LVM" * Don't use ignoreskipactivation for thin LVM * Fix iSCSI cleanup fix on discovery backends * Fix manual scan for discovery type backends * Fix ceph incremental backup fail * Updated from global requirements 1.14.0 ------ * iSCSI multipath: improve logging on connect * Fix iSCSI cleanup issue when using discovery * Updated from global requirements * Add open-iscsi manual scan support * Refactor iSCSI connect * Fix slow test\_connect\_volume\_device\_not\_valid test * Add libssl to bindep * Refactor iSCSI disconnect * Updated from global requirements * Force LUN\_ID to an int 1.13.1 ------ * Fix supported connectors for Power platform * Removed invalid comments in tox.ini [flake8] * Stop ignoring H904 hacking rule in tox * Stop ignoring H405 hacking rule in tox * Stop ignoring E265 pycodestyle rule in tox * Stop ignoring E123 and E125 pycodestyle rules * Update hacking version to align with Cinder * Fixed the veritas connector path * Change code to be more Pythonic 1.13.0 ------ * Return correct device path from Veritas connector * Prevent rbd map again if it's already mapped * Check host device alive before multipath id discovery * Updated from global requirements * Changed way of providing RBD keyring from keyring\_path to client token * encryptors: Delay removal of legacy provider names * Change log level on \_get\_hba\_channel\_scsi\_target * Veritas os-brick connector should use privsep * Adding support for FibreChannelConnector for PPC64 * Updated from global requirements 1.12.0 ------ * Fixed generated temp file problem for RBD backend * Replace random uuid with fake uuid in unit tests * Updated from global requirements * Mask logging of connection info for iSCSI connector * Include identity information in rbd commands * os-brick connector for Veritas HyperScale * Move vzstorage related code out of RemoteFsClient * Updated from global requirements * RBD: consider a custom keyring in connection info * Add Ocata release notes page * Using assertIsNone(x) instead of assertEqual(None, x) * Remove log translations * Updated from global requirements * Fix iSCSI multipath rescan * Retry multipath flush when map is in use * Fix multipath flush when using friendly names * Fix unittest run on s390x host 1.11.0 ------ * Updated from global requirements * Encryptors: Fix compat with Nova encryptors for Ocata * Add Python 3.5 classifier and venv 1.10.0 ------ * Fix a wrong indentation * s390 FC device path fix for Ubuntu 1.9.0 ----- * Updated from global requirements * encryptors: Introduce encryption provider constants * Add debug to tox environment * Windows connectors: add device\_scan\_interval arg * Add curl to bindep * Removes unnecessary utf-8 encoding * Replace assertDictMatch with assertDictEqual * Add Constraints support and missing bindep.txt dependencies * Move castellan to test-reqs * Fix import method to follow community guideline * Remove the duplicate calls to rescan * Code cleanup in initiator/linuxfc.py * Updated from global requirements * linuxfc: log path when HBA not found * RBD: ensure temporary config gets deleted * os-brick: Add bindep support * Show team and repo badges on README * Updated from global requirements * Add developer docs url in README.rst(trivial) * encryptors: Workaround mangled passphrases * encryptors: Mock os\_brick.executor correctly * RBD: enclose ipv6 addresses in square brackets * Updated from global requirements * Mask passwords in utils.trace for func params 1.8.0 ----- * Updated from global requirements * Raise specific exception for an invalid protocol connector * Updated from global requirements * Multipath device keeps old size when extending volume * Updated from global requirements * Delete deprecated Hacking in tox.ini 1.7.0 ----- * Delete MANIFEST.in in os-brick * Drop py33 support * Windows remotefs: create mountpoints at the expected location * linuxrbd: remove obsolete comment on close() * Enable release notes translation * Detect if Fibre Channel support exists * Close connection to ceph after cinder bakcup * Updated from global requirements * Updated from global requirements * Replace 'assertTrue(a not in b)' with 'assertNotIn(a, b)' * s390x iscsi support enablement * Docstrings should not start with a space * Use assertEqual() instead of assertDictEqual() * Stop calling multipath -r when attaching/detaching iSCSI volumes * DISCO: Log init message as debug * Change warning to info logging for connected volume rescans * standardize release note page ordering * Mock time.sleep for test\_lv\_deactivate\_timeout test * Update reno for stable/newton * Change assertTrue(isinstance()) with optimal assert * Remove self.\_\_dict\_\_ for formatting strings * Create connector aliases to the new connectors refactor * TrivialFix: Remove logging import unused 1.6.0 ----- * Fix cmd execution stderr, stdout unicode errors * Mask out passwords when tracing * RBD: Fix typo in rados timeout assignment * Fixes with customized ceph cluster name * Add retries to iSCSI connect\_volume * Add connector for GPFS volumes * Add missing %s in print message * Fix linuxrbd to work with Python 3 * Add tracing unit tests * Wrong param makes exception message throws inaccurate * Fix the typo in the file * Add connector for vmdk volumes * Fix iSCSI discovery with ISER transport * RemoteFsClient extend Executor * Add Windows Fibre Channel connector * Add Windows SMBFS connector * Fix FC multipath cleanup * Fix weak test\_vzstorage\_with\_mds\_list * Fix the mocking mess * Fix FC multipath rescan * Update the home-page info with the developer documentation * Splitting Out Connectors from connector.py * Remove race condition from lvextend 1.5.0 ----- * Updated from global requirements * Mock write and read operations to filesystem * Local attach feature in RBD connector * Remove useless info logging in check\_valid\_device * ScaleIO to get volume name from connection properties * Add ignore for . directories * Upgrade tox to 2.0 * Add trace facility * Fix string interpolation to delayed to be handled by the logging code * Replace assertEqual(None, \*) with assertIsNone in tests * Fix wrong path used in iscsi "multipath -l" * Updated from global requirements * Remove unused LOG to keep code clean * Fix multipath iSCSI encrypted volume attach failure * Updated from global requirements * release note for windows iSCSI * Add Windows iSCSI connector * Make code line length less than 79 characters * Updated from global requirements * Replace ip with portal to express more accurately * Fix argument order for assertEqual to (expected, observed) * Add fast8 to quickly test pep8 changes * Make RBDImageMetadata and RBDVolumeIOWrapper re-usable 1.4.0 ----- * Copy encryptors from Nova to os-brick * Disconnect multipath iscsi may logout session * Add support for processutils.execute * Updated from global requirements * Mock time.sleep in ISCSIConnectorTestCase * Updated from global requirements * Updated from global requirements * Updated from global requirements * Ensure that the base connector is platform independent * Updated from global requirements * os-brick refactor get\_connector\_properties * Handle exception case with only target\_portals * Retire ISERConnector from documentation * LVM: Create thin pool with 100%FREE * Fix coverage generation * Trivial rootwrap -> privsep replacement * Updated from global requirements * Updated from global requirements 1.3.0 ----- * LVM: Call supports\_thin\_provisioning as static * Add pylint tox env * Don't use oslo-incubator stuff * Update reno for stable/mitaka * Replace \_get\_multipath\_device\_name with \_discover\_mpath\_device * Fixes get\_all\_available\_volumes return value * Updated from global requirements * Fix Scality SOFS support * Actually run the RemoteFSClient unit tests * Mock time.sleep() in 3 unit tests 1.1.0 ----- * Fix setting the multipath\_id * Updated from global requirements * Add sheepdog support * Include multipath -ll output in failed to parse warning 1.0.0 ----- * Fix iSCSI Multipath * Add missing release notes * Lun id's > 255 should be converted to hex * Updated from global requirements * Fix output returned from get\_all\_available\_volumes * Raise exception in find\_multipath\_device * Updated from global requirements * Remove multipath -l logic from ISCSI connector * Add vzstorage protocol for remotefs connections * Add reno for release notes management * Fix get\_device\_size with newlines * Updated from global requirements 0.8.0 ----- * Add connector for ITRI DISCO cinder driver * os-brick add extend\_volume API * os-brick add cinder local\_dev lvm code * Revert "Use assertTrue/False instead of assertEqual(T/F)" * Fix another unit test failure * Use assertTrue/False instead of assertEqual(T/F) * Actually log the command used in \_run\_iscsiadm * Updated from global requirements * remove python 2.6 trove classifier 0.7.0 ----- * DRBD connector class * Updated from global requirements * Deprecated tox -downloadcache option removed * ScaleIO could connect wrong volume to VM * Allow RBDClient to be used from a with-statement * Updated from global requirements * Remove brackets from portal * Minor documentation fixes for the method parameters 0.6.0 ----- * Add requests to project requirements * Add quobyte protocol for remotefs connections * Correct a log message * Brick add param documentation to connectors * Updated from global requirements * Multipath Device Action Being Parsed as Name * Fix iopsLimit parameter in ScaleIO connector * Parse FCoE sysfs device paths * Add new Connector APIs for path validation * Updated from global requirements * Fix test\_connect\_volume when skip is bypassed * Fetch and return SCSI WWN * Update minimum tox version to 1.8 * Updated from global requirements * Wait for FC multipath devices to become writable * Check RBDConnector.disconnect\_volume device\_info argument * Updated from global requirements * Fix silent iSCSI login failures * Change os-brick to use ostestr * Updated from global requirements * Fix iSCSI multipath cleanup * Removed use of deprecated LOG.warn * Fix typo in vgc-cluster command in rootwrap file 0.5.0 ----- * Change ignore-errors to ignore\_errors * Updated from global requirements * Add fancy pypi version and download images * iSCSI fix misleading Log warning on connect fail * Fix missing value types for log message * Log a message when can’t find multipath device * Removed unused dependency: discover * Use 'device' instead of 'volume\_path' 0.4.0 ----- * Add support for --interface option in iscsiadm * FC Stop calling multipath command line * Updated from global requirements * Add rootwrap filters * Handle FC LUN IDs greater 255 correctly on s390x architectures * Fix incorrect comments in FibreChannelConnector * Adding CHAP discovery logic to os-brick * Updated from global requirements * Remove the iSCSI rescan during disconnect * Remotefs: add ScalityFS support * Updated from global requirements * Updated from global requirements * Change SCSI device removal backoff rate * Changed connector protocols to use constants * Updated from global requirements * Fix race in check and access of /dev/disk/by-path/ * Updated from global requirements 0.3.2 ----- * remotefs: add virtuozzo storage support * Perform port\_rescan on s390x platforms * FC discover existing devices for removal 0.3.1 ----- * Use pbr's automatically generated changelog 0.3.0 ----- * Updated from global requirements * Updated from global requirements * Update changelog to 0.3.0 being latest * Fix mock==1.1.0 break unit tests * Cleanup Python 3 changes * Prep for 0.2.1 release * Add connector driver for the ScaleIO cinder driver * Added ABCMeta class to the InitiatorConnector * Remove unused oslo incubator files * update os-brick to pass python3 tests * Updated from global requirements * FC Eliminate the need to return devices list * Switch to oslo.service * Add RBD connector * Add HGST Solutions connector * Support host type specific block volume attachment * Updated from global requirements * optimize multipath call to identify IQN * Updated from global requirements * Trivial exception parameter name fix for Huawei * Fix connecting unnecessary iSCSI sessions issue * Fix disconnecting necessary iSCSI sessions issue * Add retry to iSCSI delete * Updated from global requirements * Add missing connectors to factory test * Fix local connector test case inheritance 0.2.0 ----- * Allow overriding the host field * Assign the platform after declaration * Added a unit test for masking iscsiadm passwords * Preparing for the 0.1.1 release * ISCSI be careful parsing iscsiadm output * Updated from global requirements * Drop use of 'oslo' namespace package 0.1.0 ----- * Update README to work with release tools * Brick: Fix race in removing iSCSI device * Update os-brick requirements * Mask passwords with iscsiadm commands * Sync latest \_i18n module for os\_brick * Use oslo\_log instead of openstack.common.log * Sync loopingcall from oslo-incubator for os-brick * Fix wrong command for \_rescan\_multipath * Fix multipath device discovery when UFN is enabled * Use six.text\_type instead of unicode * Fix missing translations for log messages * Remove error messages from multipath command output before parsing * Remove mocks after each unit test finished * Correct project name in .gitreview * Adjust os-brick to support FCP on System z systems * Use target\_portals/iqns/luns for alternative target information * Fix comments style according to Hacking rules * Update the documentation for os-brick * Failover to alternative iSCSI portals on login failure * Remove some unused exceptions from Cinder * Brick os-brick up to par with cinder brick * renamed the project to os-brick * Created the Brick library from Cinder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/HACKING.rst0000664000175000017500000000020400000000000015207 0ustar00zuulzuul00000000000000brick Style Commandments ======================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/LICENSE0000664000175000017500000002363700000000000014435 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6597588 os_brick-6.11.0/PKG-INFO0000644000175000017500000000273400000000000014516 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: os-brick Version: 6.11.0 Summary: OpenStack Cinder brick library for managing local volume attaches Home-page: https://docs.openstack.org/os-brick/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: pbr>=5.8.0 Requires-Dist: oslo.concurrency>=5.0.0 Requires-Dist: oslo.config>=9.0.0 Requires-Dist: oslo.context>=4.1.0 Requires-Dist: oslo.log>=4.8.0 Requires-Dist: oslo.i18n>=5.1.0 Requires-Dist: oslo.privsep>=3.0.0 Requires-Dist: oslo.serialization>=4.3.0 Requires-Dist: oslo.service>=2.8.0 Requires-Dist: oslo.utils>=6.0.0 Requires-Dist: requests>=2.25.1 Requires-Dist: tenacity>=6.3.1 Requires-Dist: os-win>=5.7.0 Requires-Dist: psutil>=5.7.2 OpenStack Cinder brick library for managing local volume attaches ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/README.rst0000664000175000017500000000237500000000000015113 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/os-brick.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===== brick ===== .. image:: https://img.shields.io/pypi/v/os-brick.svg :target: https://pypi.org/project/os-brick/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/os-brick.svg :target: https://pypi.org/project/os-brick/ :alt: Downloads OpenStack Cinder brick library for managing local volume attaches Features -------- * Discovery of volumes being attached to a host for many transport protocols. * Removal of volumes from a host. Hacking ------- Hacking on brick requires Python 3.8+. A recent tox is required, as is a recent virtualenv (20.2.2 or newer). For any other information, refer to the developer documents: https://docs.openstack.org/os-brick/latest/ OR refer to the parent project, Cinder: https://docs.openstack.org/cinder/latest/ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/os-brick * License: Apache License, Version 2.0 * Source: https://opendev.org/openstack/os-brick * Bugs: https://bugs.launchpad.net/os-brick ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/bindep.txt0000664000175000017500000000353400000000000015424 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed for # install and tests # see https://docs.openstack.org/infra/bindep/ for additional information. # WARNING: please make sure any additional os-brick-specific dependency # not already covered by other packages is also installed through devstack. # os-brick installed through pip does not include this file, so # the dependencies must be installed explicitly by changing # the list of packages under files/*/os-brick in the devstack repository. curl device-mapper-multipath [platform:rpm] multipath-tools [platform:dpkg] sg3-utils [platform:dpkg] sg3_utils [platform:rpm] libxml2-devel [platform:rpm] libxml2-dev [platform:dpkg] libxslt-devel [platform:rpm] libxslt1-dev [platform:dpkg] libssl-dev [platform:dpkg] openssl-devel [platform:rpm !platform:suse] libopenssl-devel [platform:suse !platform:rpm] iscsi-initiator-utils [platform:rpm !platform:suse] open-iscsi [platform:suse platform:dpkg] lsscsi # Binary dependencies for PDF doc generation fonts-liberation [doc platform:dpkg] texlive-latex-base [doc platform:dpkg] texlive-latex-extra [doc platform:dpkg] texlive-xetex [doc platform:dpkg] texlive-fonts-recommended [doc platform:dpkg] xindy [doc platform:dpkg] latexmk [doc platform:dpkg] texlive [doc platform:rpm] texlive-fncychap [doc platform:rpm] texlive-titlesec [doc platform:rpm] texlive-tabulary [doc platform:rpm] texlive-framed [doc platform:rpm] texlive-wrapfig [doc platform:rpm] texlive-upquote [doc platform:rpm] texlive-capt-of [doc platform:rpm] texlive-needspace [doc platform:rpm] texlive-polyglossia [doc platform:rpm] texlive-xetex [doc platform:rpm] texlive-xindy [doc platform:rpm] texlive-courier [doc platform:rpm] latexmk [doc platform:rpm] python3-sphinxcontrib-svg2pdfconverter-common [doc platform:rpm] librsvg2-tools [doc platform:rpm] librsvg2-bin [doc platform:dpkg] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/0000775000175000017500000000000000000000000014162 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/requirements.txt0000664000175000017500000000063300000000000017450 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. openstackdocstheme>=2.2.7 # Apache-2.0 reno>=3.2.0 # Apache-2.0 sphinx>=3.5.1 # BSD os-api-ref>=2.1.0 # Apache-2.0 sphinxcontrib-apidoc>=0.3.0 # BSD sphinx-feature-classification>=1.1.0 # Apache 2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/source/0000775000175000017500000000000000000000000015462 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/conf.py0000664000175000017500000000606600000000000016771 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'reno.sphinxext', 'openstackdocstheme', ] # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2015, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_repo_name = 'openstack/os-brick' openstackdocs_pdf_link = True openstackdocs_bug_project = 'os-brick' openstackdocs_bug_tag = '' # -- Options for autodoc ------------------------------------------------------ autodoc_mock_imports = ['cryptography'] # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-os-brick.tex', 'OS Brick Documentation', 'Cinder Contributors', 'manual') ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', } latex_additional_files = [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/source/contributor/0000775000175000017500000000000000000000000020034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/contributor/contributing.rst0000664000175000017500000000130200000000000023271 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. The os-brick library is maintained by the OpenStack Cinder project. To understand our development process and how you can contribute to it, please look at the Cinder project's general contributor's page: http://docs.openstack.org/cinder/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/index.rst0000664000175000017500000000070400000000000017324 0ustar00zuulzuul00000000000000======== os-brick ======== `os-brick` is a Python package containing classes that help with volume discovery and removal from a host. Installation Guide ------------------ .. toctree:: :maxdepth: 2 install/index Usage Guide ----------- .. toctree:: :maxdepth: 2 user/tutorial Reference --------- .. toctree:: :maxdepth: 2 reference/index Contributing ------------ .. toctree:: :maxdepth: 2 contributor/contributing ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/source/install/0000775000175000017500000000000000000000000017130 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/install/index.rst0000664000175000017500000000057000000000000020773 0ustar00zuulzuul00000000000000============ Installation ============ At the command line: .. code-block:: shell $ pip install os-brick Or, if you have virtualenvwrapper installed: .. code-block:: shell $ mkvirtualenv os-brick $ pip install os-brick Or, from source: .. code-block:: shell $ git clone https://opendev.org/openstack/os-brick $ cd os-brick $ python setup.py install ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/source/reference/0000775000175000017500000000000000000000000017420 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/reference/index.rst0000664000175000017500000000035300000000000021262 0ustar00zuulzuul00000000000000API Documentation ================= The **os-brick** package provides the ability to collect host initiator information as well as discovery volumes and removal of volumes from a host. .. toctree:: :maxdepth: 2 os_brick/index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/source/reference/os_brick/0000775000175000017500000000000000000000000021213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/reference/os_brick/exception.rst0000664000175000017500000000112300000000000023740 0ustar00zuulzuul00000000000000:mod:`exception` -- Exceptions ============================== .. automodule:: os_brick.exception :synopsis: Exceptions generated by os-brick .. autoclass:: os_brick.exception.BrickException .. autoclass:: os_brick.exception.NotFound .. autoclass:: os_brick.exception.Invalid .. autoclass:: os_brick.exception.InvalidParameterValue .. autoclass:: os_brick.exception.NoFibreChannelHostsFound .. autoclass:: os_brick.exception.NoFibreChannelVolumeDeviceFound .. autoclass:: os_brick.exception.VolumeDeviceNotFound .. autoclass:: os_brick.exception.ProtocolNotSupported ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/reference/os_brick/index.rst0000664000175000017500000000034400000000000023055 0ustar00zuulzuul00000000000000:mod:`os_brick` -- OpenStack Brick library ========================================== .. automodule:: os_brick :synopsis: OpenStack Brick library Sub-modules: .. toctree:: :maxdepth: 2 initiator/index exception ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/source/reference/os_brick/initiator/0000775000175000017500000000000000000000000023215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/reference/os_brick/initiator/connector.rst0000664000175000017500000000211500000000000025740 0ustar00zuulzuul00000000000000:mod:`connector` -- Connector ============================= .. automodule:: os_brick.initiator.connector :synopsis: Connector module for os-brick .. autoclass:: os_brick.initiator.connector.InitiatorConnector .. automethod:: factory .. autoclass:: os_brick.initiator.connector.ISCSIConnector .. automethod:: connect_volume .. automethod:: disconnect_volume .. autoclass:: os_brick.initiator.connector.FibreChannelConnector .. automethod:: connect_volume .. automethod:: disconnect_volume .. autoclass:: os_brick.initiator.connector.LocalConnector .. automethod:: connect_volume .. automethod:: disconnect_volume .. autoclass:: os_brick.initiator.connector.HuaweiStorHyperConnector .. automethod:: connect_volume .. automethod:: disconnect_volume .. autoclass:: os_brick.initiator.connectors.nvmeof.NVMeOFConnector .. automethod:: connect_volume .. automethod:: disconnect_volume .. automethod:: extend_volume .. automethod:: get_volume_paths .. automethod:: get_connector_properties ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/reference/os_brick/initiator/index.rst0000664000175000017500000000027200000000000025057 0ustar00zuulzuul00000000000000:mod:`initiator` -- Initiator ============================= .. automodule:: os_brick.initiator :synopsis: Initiator module Sub-modules: .. toctree:: :maxdepth: 2 connector ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5957592 os_brick-6.11.0/doc/source/user/0000775000175000017500000000000000000000000016440 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/doc/source/user/tutorial.rst0000664000175000017500000000435300000000000021042 0ustar00zuulzuul00000000000000======== Tutorial ======== This tutorial is intended as an introduction to working with **os-brick**. Prerequisites ------------- Before we start, make sure that you have the **os-brick** distribution :doc:`installed `. In the Python shell, the following should run without raising an exception: .. code-block:: bash >>> import os_brick Configuration ------------- There are some os-brick connectors that use file locks to prevent concurrent access to critical sections of the code. These file locks use the ``oslo.concurrency`` ``lock_utils`` module and require the ``lock_path`` to be configured with the path where locks should be created. os-brick can use a specific directory just for its locks or use the same directory as the service using os-brick. The os-brick specific configuration option is ``[os_brick]/lock_path``, and if left undefined it will use the value from ``[oslo_concurrency]/lock_path``. Setup ----- Once os_brick has been loaded it needs to be initialized, which is done by calling the ``os_brick.setup`` method with the ``oslo.conf`` configuration. It is important that the call to ``setup`` method happens **after** oslo.config has been properly initialized. .. code-block:: python from oslo_config import cfg from cinder import version CONF = cfg.CONF def main(): CONF(sys.argv[1:], project='cinder', version=version.version_string()) os_brick.setup(CONF) Fetch all of the initiator information from the host ---------------------------------------------------- An example of how to collect the initiator information that is needed to export a volume to this host. .. code-block:: python from os_brick.initiator import connector os_brick.setup(CONF) # what helper do you want to use to get root access? root_helper = "sudo" # The ip address of the host you are running on my_ip = "192.168.1.1" # Do you want to support multipath connections? multipath = True # Do you want to enforce that multipath daemon is running? enforce_multipath = False initiator = connector.get_connector_properties(root_helper, my_ip, multipath, enforce_multipath) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.583759 os_brick-6.11.0/etc/0000775000175000017500000000000000000000000014170 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.583759 os_brick-6.11.0/etc/os-brick/0000775000175000017500000000000000000000000015701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.599759 os_brick-6.11.0/etc/os-brick/rootwrap.d/0000775000175000017500000000000000000000000020000 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/etc/os-brick/rootwrap.d/os-brick.filters0000664000175000017500000000072700000000000023111 0ustar00zuulzuul00000000000000# os-brick command filters # This file should be owned by (and only-writeable by) the root user [Filters] # privileged/__init__.py: priv_context.PrivContext(default) # This line ties the superuser privs with the config files, context name, # and (implicitly) the actual python code invoked. privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/mypy-files.txt0000664000175000017500000000144500000000000016260 0ustar00zuulzuul00000000000000os_brick/encryptors/__init__.py os_brick/encryptors/luks.py os_brick/exception.py os_brick/executor.py os_brick/i18n.py os_brick/utils.py os_brick/initiator/__init__.py os_brick/initiator/linuxscsi.py os_brick/initiator/connectors/base.py os_brick/initiator/connectors/base_iscsi.py os_brick/initiator/connectors/base_rbd.py os_brick/initiator/connectors/fibre_channel.py os_brick/initiator/connectors/iscsi.py os_brick/initiator/connectors/nvmeof.py os_brick/initiator/connectors/local.py os_brick/initiator/connectors/rbd.py os_brick/initiator/connectors/remotefs.py os_brick/initiator/host_driver.py os_brick/initiator/linuxfc.py os_brick/initiator/linuxrbd.py os_brick/initiator/utils.py os_brick/local_dev/lvm.py os_brick/privileged/nvmeof.py os_brick/privileged/rbd.py os_brick/remotefs/remotefs.py ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.599759 os_brick-6.11.0/os_brick/0000775000175000017500000000000000000000000015210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/__init__.py0000664000175000017500000000213200000000000017317 0ustar00zuulzuul00000000000000# Copyright (c) 2022, Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from os_brick import opts LOG = logging.getLogger(__name__) def setup(conf, **kwargs): """Setup the os-brick library. Service configuration options must have been initialized before this call because oslo's lock_path doesn't have a value before that. Having kwargs allows us to receive parameters in the future. """ if kwargs: LOG.warning('Ignoring arguments %s', kwargs.keys()) opts.set_defaults(conf) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.603759 os_brick-6.11.0/os_brick/caches/0000775000175000017500000000000000000000000016436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/caches/__init__.py0000664000175000017500000000646100000000000020556 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import abc from oslo_log import log as logging from oslo_utils import importutils from os_brick import exception from os_brick.i18n import _ LOG = logging.getLogger(__name__) CACHE_ENGINE_TO_CACHE_CLASS_MAP = { "opencas": 'os_brick.caches.opencas.OpenCASEngine', } class CacheEngineBase(object, metaclass=abc.ABCMeta): def __init__(self, **kwargs): self._root_helper = kwargs.get('root_helper') @abc.abstractmethod def is_engine_ready(self, **kwargs): return @abc.abstractmethod def attach_volume(self, **kwargs): return @abc.abstractmethod def detach_volume(self, **kwargs): return class CacheManager(): """Cache manager for volumes. This CacheManager uses cache engines to do volume cache. """ def __init__(self, root_helper, connection_info, *args, **kwargs): data = connection_info['data'] if not data.get('device_path'): volume_id = data.get('volume_id') or connection_info.get('serial') raise exception.VolumeLocalCacheNotSupported( volume_id=volume_id, volume_type=connection_info.get('driver_volume_type')) self.ori_device_path = data.get('device_path') if not data.get('cacheable'): self.cacheable = False return self.cacheable = True self.root_helper = root_helper self.engine_name = kwargs.get('cache_name') self.args = args self.kwargs = kwargs self.kwargs["root_helper"] = root_helper self.kwargs["dev_path"] = data.get('device_path') self.engine = self._get_engine(self.engine_name, **self.kwargs) def _get_engine(self, engine_name, **kwargs): eng_cls_path = CACHE_ENGINE_TO_CACHE_CLASS_MAP.get(engine_name) if eng_cls_path: engine_cls = importutils.import_class(eng_cls_path) eng = engine_cls(**kwargs) if eng.is_engine_ready(): return eng raise exception.Invalid(_("No valid cache engine")) def attach_volume(self): """setup the cache when attaching volume.""" if not self.cacheable: return self.ori_device_path LOG.debug("volume before cached: %s", self.kwargs.get('dev_path')) emulated_disk = self.engine.attach_volume(**self.kwargs) LOG.debug("volume after cached: %s", emulated_disk) return emulated_disk def detach_volume(self): """Release the cache on detaching volume.""" if not self.cacheable: return self.ori_device_path LOG.debug("volume before detach: %s", self.kwargs.get('dev_path')) ori_disk = self.engine.detach_volume(**self.kwargs) LOG.debug("volume after detach: %s", ori_disk) return ori_disk ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/caches/opencas.py0000664000175000017500000001014500000000000020441 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils as putils from oslo_log import log as logging from os_brick import caches from os_brick import exception from os_brick import executor LOG = logging.getLogger(__name__) class OpenCASEngine(executor.Executor, caches.CacheEngineBase): def __init__(self, **kwargs): super(OpenCASEngine, self).__init__(**kwargs) self.cache_id = kwargs.get('opencas_cache_id') def os_execute(self, *cmd, **kwargs): LOG.debug('os_execute: cmd: %s, args: %s', cmd, kwargs) try: out, err = self._execute(*cmd, **kwargs) except putils.ProcessExecutionError as err: LOG.exception('os_execute error') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise return out, err def is_engine_ready(self, **kwargs): """'casadm -L' will print like: type id disk status write policy device cache 1 /dev/nvme0n1 Running wt - """ cmd = ['casadm', '-L'] kwargs = dict(run_as_root=True, root_helper=self._root_helper) out, err = self.os_execute(*cmd, **kwargs) for line in out.splitlines(): fields = line.split() if str(self.cache_id) == fields[1] and 'Running' == fields[3]: return True return False def attach_volume(self, **kwargs): core = kwargs.get('dev_path') if core is None: LOG.error('dev_path is not specified') raise exception.VolumePathsNotFound() core = os.path.realpath(core) return self._map_casdisk(core) def detach_volume(self, **kwargs): casdev = kwargs.get('dev_path') if casdev is None: LOG.error('dev_path is not specified') raise exception.VolumePathsNotFound() coreid, coredev = self._get_mapped_coredev(casdev) LOG.info("opencas: coreid=%s,coredev=%s", coreid, coredev) self._unmap_casdisk(coreid) return coredev def _get_mapped_casdev(self, core): cmd = ['casadm', '-L'] kwargs = dict(run_as_root=True, root_helper=self._root_helper) out, err = self.os_execute(*cmd, **kwargs) for line in out.splitlines(): if line.find(core) < 0: continue fields = line.split() return fields[5] raise exception.BrickException('Cannot find emulated device.') def _get_mapped_coredev(self, casdev): cmd = ['casadm', '-L'] kwargs = dict(run_as_root=True, root_helper=self._root_helper) out, err = self.os_execute(*cmd, **kwargs) for line in out.splitlines(): if line.find(casdev) < 0: continue fields = line.split() return (fields[1], fields[2]) raise exception.BrickException('Cannot find core device.') def _map_casdisk(self, core): cmd = ['casadm', '-A', '-i', self.cache_id, '-d', core] kwargs = dict(run_as_root=True, root_helper=self._root_helper) out, err = self.os_execute(*cmd, **kwargs) return self._get_mapped_casdev(core) def _unmap_casdisk(self, coreid): cmd = ['casadm', '-R', '-f', '-i', self.cache_id, '-j', coreid] kwargs = dict(run_as_root=True, root_helper=self._root_helper) out, err = self.os_execute(*cmd, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/constants.py0000664000175000017500000000214600000000000017601 0ustar00zuulzuul00000000000000# Copyright (c) 2023, Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Valid SCSI addressing values for 'addressing_mode' in connection info. # More information in os_bric.initiator.linuxscsi.LinuxSCSI.lun_for_addressing SCSI_ADDRESSING_TRANSPARENT = 'transparent' SCSI_ADDRESSING_SAM = 'SAM' SCSI_ADDRESSING_SAM2 = 'SAM2' SCSI_ADDRESSING_SAM3_FLAT = 'SAM3-flat' SCSI_ADDRESSING_MODES = (SCSI_ADDRESSING_TRANSPARENT, SCSI_ADDRESSING_SAM, SCSI_ADDRESSING_SAM2, SCSI_ADDRESSING_SAM3_FLAT) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.603759 os_brick-6.11.0/os_brick/encryptors/0000775000175000017500000000000000000000000017420 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/encryptors/__init__.py0000664000175000017500000001255700000000000021543 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations from typing import Any from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import strutils from os_brick.encryptors import base from os_brick.encryptors import nop LOG = logging.getLogger(__name__) LUKS = "luks" LUKS2 = "luks2" PLAIN = "plain" FORMAT_TO_FRONTEND_ENCRYPTOR_MAP = { LUKS: 'os_brick.encryptors.luks.LuksEncryptor', LUKS2: 'os_brick.encryptors.luks.Luks2Encryptor', PLAIN: 'os_brick.encryptors.cryptsetup.CryptsetupEncryptor' } LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP = { "nova.volume.encryptors.luks.LuksEncryptor": LUKS, "nova.volume.encryptors.cryptsetup.CryptsetupEncryptor": PLAIN, "nova.volume.encryptors.nop.NoopEncryptor": None, "os_brick.encryptors.luks.LuksEncryptor": LUKS, "os_brick.encryptors.cryptsetup.CryptsetupEncryptor": PLAIN, "os_brick.encryptors.nop.NoopEncryptor": None, "LuksEncryptor": LUKS, "CryptsetupEncryptor": PLAIN, "NoOpEncryptor": None, } def get_volume_encryptor(root_helper: str, connection_info: dict[str, Any], keymgr, execute=None, *args, **kwargs) -> base.VolumeEncryptor: """Creates a VolumeEncryptor used to encrypt the specified volume. :param: the connection information used to attach the volume :returns VolumeEncryptor: the VolumeEncryptor for the volume """ encryptor = nop.NoOpEncryptor(root_helper=root_helper, connection_info=connection_info, keymgr=keymgr, execute=execute, *args, **kwargs) location = kwargs.get('control_location', None) if location and location.lower() == 'front-end': # case insensitive provider = kwargs.get('provider') # TODO(lyarwood): Remove the following in Queens and raise an # ERROR if provider is not a key in SUPPORTED_ENCRYPTION_PROVIDERS. # Until then continue to allow both the class name and path to be used. if provider in LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP: LOG.warning("Use of the in tree encryptor class %(provider)s" " by directly referencing the implementation class" " will be blocked in the Queens release of" " os-brick.", {'provider': provider}) provider = LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider] if provider in FORMAT_TO_FRONTEND_ENCRYPTOR_MAP: provider = FORMAT_TO_FRONTEND_ENCRYPTOR_MAP[provider] elif provider is None: provider = "os_brick.encryptors.nop.NoOpEncryptor" else: LOG.warning("Use of the out of tree encryptor class " "%(provider)s will be blocked with the Queens " "release of os-brick.", {'provider': provider}) try: encryptor = importutils.import_object( provider, root_helper, connection_info, keymgr, execute, **kwargs) except Exception as e: LOG.error("Error instantiating %(provider)s: %(exception)s", {'provider': provider, 'exception': e}) raise msg = ("Using volume encryptor '%(encryptor)s' for connection: " "%(connection_info)s" % {'encryptor': encryptor, 'connection_info': connection_info}) LOG.debug(strutils.mask_password(msg)) return encryptor def get_encryption_metadata(context, volume_api, volume_id: str, connection_info: dict[str, Any]) -> dict[str, Any]: metadata = {} if ('data' in connection_info and connection_info['data'].get('encrypted', False)): try: metadata = volume_api.get_volume_encryption_metadata(context, volume_id) if not metadata: LOG.warning('Volume %s should be encrypted but there is no ' 'encryption metadata.', volume_id) except Exception as e: LOG.error("Failed to retrieve encryption metadata for " "volume %(volume_id)s: %(exception)s", {'volume_id': volume_id, 'exception': e}) raise if metadata: msg = ("Using volume encryption metadata '%(metadata)s' for " "connection: %(connection_info)s" % {'metadata': metadata, 'connection_info': connection_info}) LOG.debug(strutils.mask_password(msg)) return metadata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/encryptors/base.py0000664000175000017500000000470600000000000020713 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from os_brick import executor from os_brick import utils class VolumeEncryptor(executor.Executor, metaclass=abc.ABCMeta): """Base class to support encrypted volumes. A VolumeEncryptor provides hooks for attaching and detaching volumes, which are called immediately prior to attaching the volume to an instance and immediately following detaching the volume from an instance. This class performs no actions for either hook. """ def __init__(self, root_helper, connection_info, keymgr, execute=None, *args, **kwargs): super(VolumeEncryptor, self).__init__(root_helper, execute=execute, *args, **kwargs) self._key_manager = keymgr self.encryption_key_id = kwargs.get('encryption_key_id') def _get_key(self, context): """Retrieves the encryption key for the specified volume. :param: the connection information used to attach the volume """ return self._key_manager.get(context, self.encryption_key_id) def _get_encryption_key_as_passphrase(self, context): key = self._get_key(context) return utils.get_passphrase_from_secret(key) @abc.abstractmethod def attach_volume(self, context, **kwargs): """Hook called immediately prior to attaching a volume to an instance. """ pass @abc.abstractmethod def detach_volume(self, **kwargs): """Hook called immediately after detaching a volume from an instance. """ pass @abc.abstractmethod def extend_volume(self, context, **kwargs): """Extend an encrypted volume and return the decrypted volume size.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/encryptors/cryptsetup.py0000664000175000017500000001627300000000000022225 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils from oslo_log import log as logging from oslo_log import versionutils from os_brick.encryptors import base from os_brick import exception LOG = logging.getLogger(__name__) class CryptsetupEncryptor(base.VolumeEncryptor): """A VolumeEncryptor based on dm-crypt. This VolumeEncryptor uses dm-crypt to encrypt the specified volume. """ def __init__(self, root_helper, connection_info, keymgr, execute=None, *args, **kwargs): super(CryptsetupEncryptor, self).__init__( root_helper=root_helper, connection_info=connection_info, keymgr=keymgr, execute=execute, *args, **kwargs) # Fail if no device_path was set when connecting the volume, e.g. in # the case of libvirt network volume drivers. data = connection_info['data'] if not data.get('device_path'): volume_id = data.get('volume_id') or connection_info.get('serial') raise exception.VolumeEncryptionNotSupported( volume_id=volume_id, volume_type=connection_info['driver_volume_type']) # the device's path as given to libvirt -- e.g., /dev/disk/by-path/... self.symlink_path = connection_info['data']['device_path'] # a unique name for the volume -- e.g., the iSCSI participant name self.dev_name = 'crypt-%s' % os.path.basename(self.symlink_path) # NOTE(lixiaoy1): This is to import fix for 1439869 from Nova. # NOTE(tsekiyama): In older version of nova, dev_name was the same # as the symlink name. Now it has 'crypt-' prefix to avoid conflict # with multipath device symlink. To enable rolling update, we use the # old name when the encrypted volume already exists. old_dev_name = os.path.basename(self.symlink_path) wwn = data.get('multipath_id') if self._is_crypt_device_available(old_dev_name): self.dev_name = old_dev_name LOG.debug("Using old encrypted volume name: %s", self.dev_name) elif wwn and wwn != old_dev_name: # FibreChannel device could be named '/dev/mapper/'. if self._is_crypt_device_available(wwn): self.dev_name = wwn LOG.debug("Using encrypted volume name from wwn: %s", self.dev_name) # the device's actual path on the compute host -- e.g., /dev/sd_ self.dev_path = os.path.realpath(self.symlink_path) def _is_crypt_device_available(self, dev_name): if not os.path.exists('/dev/mapper/%s' % dev_name): return False try: self._execute('cryptsetup', 'status', dev_name, run_as_root=True) except processutils.ProcessExecutionError as e: # If /dev/mapper/ is a non-crypt block device (such as a # normal disk or multipath device), exit_code will be 1. In the # case, we will omit the warning message. if e.exit_code != 1: LOG.warning('cryptsetup status %(dev_name)s exited ' 'abnormally (status %(exit_code)s): %(err)s', {"dev_name": dev_name, "exit_code": e.exit_code, "err": e.stderr}) return False return True def _open_volume(self, passphrase, **kwargs): """Open the LUKS partition on the volume using passphrase. :param passphrase: the passphrase used to access the volume """ LOG.debug("opening encrypted volume %s", self.dev_path) # NOTE(joel-coffman): cryptsetup will strip trailing newlines from # input specified on stdin unless --key-file=- is specified. cmd = ["cryptsetup", "create", "--key-file=-"] cipher = kwargs.get("cipher", None) if cipher is not None: cmd.extend(["--cipher", cipher]) key_size = kwargs.get("key_size", None) if key_size is not None: cmd.extend(["--key-size", key_size]) cmd.extend([self.dev_name, self.dev_path]) self._execute(*cmd, process_input=passphrase, check_exit_code=True, run_as_root=True, root_helper=self._root_helper) def attach_volume(self, context, **kwargs): """Shadow the device and pass an unencrypted version to the instance. Transparent disk encryption is achieved by mounting the volume via dm-crypt and passing the resulting device to the instance. The instance is unaware of the underlying encryption due to modifying the original symbolic link to refer to the device mounted by dm-crypt. """ # TODO(lyarwood): Remove this encryptor and refactor the LUKS based # encryptors in the U release. versionutils.report_deprecated_feature( LOG, "The plain CryptsetupEncryptor is deprecated and will be removed " "in a future release. Existing users are encouraged to retype " "any existing volumes using this encryptor to the 'luks' " "LuksEncryptor or 'luks2' Luks2Encryptor encryptors as soon as " "possible.") passphrase = self._get_encryption_key_as_passphrase(context) self._open_volume(passphrase, **kwargs) # modify the original symbolic link to refer to the decrypted device self._execute('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, root_helper=self._root_helper, run_as_root=True, check_exit_code=True) def _close_volume(self, **kwargs): """Closes the device (effectively removes the dm-crypt mapping).""" LOG.debug("closing encrypted volume %s", self.dev_path) # NOTE(mdbooth): remove will return 4 (wrong device specified) if # the device doesn't exist. We assume here that the caller hasn't # specified the wrong device, and that it doesn't exist because it # isn't open. We don't fail in this case in order to make this # operation idempotent. self._execute('cryptsetup', 'remove', self.dev_name, run_as_root=True, check_exit_code=[0, 4], root_helper=self._root_helper) def detach_volume(self, **kwargs): """Removes the dm-crypt mapping for the device.""" self._close_volume(**kwargs) def extend_volume(self, context, **kwargs): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/encryptors/luks.py0000664000175000017500000002564600000000000020765 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils from oslo_log import log as logging from os_brick.encryptors import base from os_brick import exception from os_brick.privileged import rootwrap as priv_rootwrap from os_brick import utils LOG = logging.getLogger(__name__) def is_luks(root_helper, device, execute=None): """Checks if the specified device uses LUKS for encryption. :param device: the device to check :returns: true if the specified device uses LUKS; false otherwise """ try: # check to see if the device uses LUKS: exit status is 0 # if the device is a LUKS partition and non-zero if not if execute is None: execute = priv_rootwrap.execute execute('cryptsetup', 'isLuks', '--verbose', device, run_as_root=True, root_helper=root_helper, check_exit_code=True) return True except processutils.ProcessExecutionError as e: LOG.warning("isLuks exited abnormally (status %(exit_code)s): " "%(stderr)s", {"exit_code": e.exit_code, "stderr": e.stderr}) return False class LuksEncryptor(base.VolumeEncryptor): """A VolumeEncryptor based on LUKS. This VolumeEncryptor uses dm-crypt to encrypt the specified volume. """ def __init__(self, root_helper, connection_info, keymgr, execute=None, *args, **kwargs): super(LuksEncryptor, self).__init__( root_helper=root_helper, connection_info=connection_info, keymgr=keymgr, execute=execute, *args, **kwargs) # Fail if no device_path was set when connecting the volume, e.g. in # the case of libvirt network volume drivers. data = connection_info['data'] if not data.get('device_path'): volume_id = data.get('volume_id') or connection_info.get('serial') raise exception.VolumeEncryptionNotSupported( volume_id=volume_id, volume_type=connection_info['driver_volume_type']) # the device's path as given to libvirt -- e.g., /dev/disk/by-path/... self.symlink_path = connection_info['data']['device_path'] # a unique name for the volume -- e.g., the iSCSI participant name self.dev_name = 'crypt-%s' % os.path.basename(self.symlink_path) # NOTE(lixiaoy1): This is to import fix for 1439869 from Nova. # NOTE(tsekiyama): In older version of nova, dev_name was the same # as the symlink name. Now it has 'crypt-' prefix to avoid conflict # with multipath device symlink. To enable rolling update, we use the # old name when the encrypted volume already exists. old_dev_name = os.path.basename(self.symlink_path) wwn = data.get('multipath_id') if self._is_crypt_device_available(old_dev_name): self.dev_name = old_dev_name LOG.debug("Using old encrypted volume name: %s", self.dev_name) elif wwn and wwn != old_dev_name: # FibreChannel device could be named '/dev/mapper/'. if self._is_crypt_device_available(wwn): self.dev_name = wwn LOG.debug( "Using encrypted volume name from wwn: %s", self.dev_name) # the device's actual path on the compute host -- e.g., /dev/sd_ self.dev_path = os.path.realpath(self.symlink_path) def _is_crypt_device_available(self, dev_name): if not os.path.exists('/dev/mapper/%s' % dev_name): return False try: self._execute('cryptsetup', 'status', dev_name, run_as_root=True) except processutils.ProcessExecutionError as e: # If /dev/mapper/ is a non-crypt block device (such as a # normal disk or multipath device), exit_code will be 1. In the # case, we will omit the warning message. if e.exit_code != 1: LOG.warning('cryptsetup status %(dev_name)s exited ' 'abnormally (status %(exit_code)s): %(err)s', {"dev_name": dev_name, "exit_code": e.exit_code, "err": e.stderr}) return False return True def _format_volume(self, passphrase, **kwargs): """Creates a LUKS v1 header on the volume. :param passphrase: the passphrase used to access the volume """ self._format_luks_volume(passphrase, 'luks1', **kwargs) def _format_luks_volume(self, passphrase, version, **kwargs): """Creates a LUKS header of a given version or type on the volume. :param passphrase: the passphrase used to access the volume :param version: the LUKS version or type to use: one of `luks`, `luks1`, or `luks2`. Be aware that `luks` gives you the default LUKS format preferred by the particular cryptsetup being used (depends on version and compile time parameters), which could be either LUKS1 or LUKS2, so it's better to be specific about what you want here """ LOG.debug("formatting encrypted volume %s", self.dev_path) # NOTE(joel-coffman): cryptsetup will strip trailing newlines from # input specified on stdin unless --key-file=- is specified. cmd = ["cryptsetup", "--batch-mode", "luksFormat", "--type", version, "--key-file=-"] cipher = kwargs.get("cipher", None) if cipher is not None: cmd.extend(["--cipher", cipher]) key_size = kwargs.get("key_size", None) if key_size is not None: cmd.extend(["--key-size", key_size]) cmd.extend([self.dev_path]) self._execute(*cmd, process_input=passphrase, check_exit_code=True, run_as_root=True, root_helper=self._root_helper, attempts=3) def _open_volume(self, passphrase, **kwargs): """Opens the LUKS partition on the volume using passphrase. :param passphrase: the passphrase used to access the volume """ LOG.debug("opening encrypted volume %s", self.dev_path) self._execute('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input=passphrase, run_as_root=True, check_exit_code=True, root_helper=self._root_helper) def attach_volume(self, context, **kwargs): """Shadow the device and pass an unencrypted version to the instance. Transparent disk encryption is achieved by mounting the volume via dm-crypt and passing the resulting device to the instance. The instance is unaware of the underlying encryption due to modifying the original symbolic link to refer to the device mounted by dm-crypt. """ passphrase = self._get_encryption_key_as_passphrase(context) try: self._open_volume(passphrase, **kwargs) except processutils.ProcessExecutionError as e: if e.exit_code == 1 and not is_luks(self._root_helper, self.dev_path, execute=self._execute): # the device has never been formatted; format it and try again LOG.info("%s is not a valid LUKS device;" " formatting device for first use", self.dev_path) self._format_volume(passphrase, **kwargs) self._open_volume(passphrase, **kwargs) else: raise # modify the original symbolic link to refer to the decrypted device self._execute('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, root_helper=self._root_helper, run_as_root=True, check_exit_code=True) def _close_volume(self, **kwargs): """Closes the device (effectively removes the dm-crypt mapping).""" LOG.debug("closing encrypted volume %s", self.dev_path) # NOTE(mdbooth): luksClose will return 4 (wrong device specified) if # the device doesn't exist. We assume here that the caller hasn't # specified the wrong device, and that it doesn't exist because it # isn't open. We don't fail in this case in order to make this # operation idempotent. self._execute('cryptsetup', 'luksClose', self.dev_name, run_as_root=True, check_exit_code=[0, 4], root_helper=self._root_helper, attempts=3) def detach_volume(self, **kwargs): """Removes the dm-crypt mapping for the device.""" self._close_volume(**kwargs) def extend_volume(self, context, **kwargs): """Extend an encrypted volume and return the decrypted volume size.""" symlink = self.symlink_path LOG.debug('Resizing mapping %s to match underlying device', symlink) passphrase = self._get_encryption_key_as_passphrase(context) self._execute('cryptsetup', 'resize', symlink, process_input=passphrase, run_as_root=True, check_exit_code=True, root_helper=self._root_helper) res = utils.get_device_size(self, symlink) LOG.debug('New size of mapping is %s', res) return res class Luks2Encryptor(LuksEncryptor): """A VolumeEncryptor based on LUKS v2. This VolumeEncryptor uses dm-crypt to encrypt the specified volume. """ def __init__(self, root_helper, connection_info, keymgr, execute=None, *args, **kwargs): super(Luks2Encryptor, self).__init__( root_helper=root_helper, connection_info=connection_info, keymgr=keymgr, execute=execute, *args, **kwargs) # type: ignore def _format_volume(self, passphrase, **kwargs): """Creates a LUKS v2 header on the volume. :param passphrase: the passphrase used to access the volume """ self._format_luks_volume(passphrase, 'luks2', **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/encryptors/nop.py0000664000175000017500000000310500000000000020565 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.encryptors import base class NoOpEncryptor(base.VolumeEncryptor): """A VolumeEncryptor that does nothing. This class exists solely to wrap regular (i.e., unencrypted) volumes so that they do not require special handling with respect to an encrypted volume. This implementation performs no action when a volume is attached or detached. """ def __init__(self, root_helper, connection_info, keymgr, execute=None, *args, **kwargs): super(NoOpEncryptor, self).__init__( root_helper=root_helper, connection_info=connection_info, keymgr=keymgr, execute=execute, *args, **kwargs) def attach_volume(self, context, **kwargs): pass def detach_volume(self, **kwargs): pass def extend_volume(self, context, **kwargs): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/exception.py0000664000175000017500000001655400000000000017573 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions for the Brick library.""" from __future__ import annotations import traceback from typing import Any, Optional from oslo_concurrency import processutils as putils from oslo_log import log as logging from os_brick.i18n import _ LOG = logging.getLogger(__name__) class BrickException(Exception): """Base Brick Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers: dict = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception("Exception in string format operation. " "msg='%s'", self.message) for name, value in kwargs.items(): LOG.error("%(name)s: %(value)s", {'name': name, 'value': value}) # at least get the core message out if something happened message = self.message # Put the message in 'msg' so that we can access it. If we have it in # message it will be overshadowed by the class' message attribute self.msg = message super(BrickException, self).__init__(message) class NotFound(BrickException): message = _("Resource could not be found.") code = 404 safe = True class Invalid(BrickException): message = _("Unacceptable parameters.") code = 400 # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class NoFibreChannelHostsFound(BrickException): message = _("We are unable to locate any Fibre Channel devices.") class NoFibreChannelVolumeDeviceFound(BrickException): message = _("Unable to find a Fibre Channel volume device.") class VolumeNotDeactivated(BrickException): message = _('Volume %(name)s was not deactivated in time.') class VolumeDeviceNotFound(BrickException): message = _("Volume device not found at %(device)s.") class VolumePathsNotFound(BrickException): message = _("Could not find any paths for the volume.") class VolumePathNotRemoved(BrickException): message = _("Volume path %(volume_path)s was not removed in time.") class ProtocolNotSupported(BrickException): message = _("Connect to volume via protocol %(protocol)s not supported.") class TargetPortalNotFound(BrickException): message = _("Unable to find target portal %(target_portal)s.") class TargetPortalsNotFound(TargetPortalNotFound): message = _("Unable to find target portal in %(target_portals)s.") class FailedISCSITargetPortalLogin(BrickException): message = _("Unable to login to iSCSI Target Portal") class BlockDeviceReadOnly(BrickException): message = _("Block device %(device)s is Read-Only.") class VolumeGroupNotFound(BrickException): message = _("Unable to find Volume Group: %(vg_name)s") class VolumeGroupCreationFailed(BrickException): message = _("Failed to create Volume Group: %(vg_name)s") class CommandExecutionFailed(BrickException): message = _("Failed to execute command %(cmd)s") class VolumeDriverException(BrickException): message = _('An error occurred while IO to volume %(name)s.') class InvalidIOHandleObject(BrickException): message = _('IO handle of %(protocol)s has wrong object ' 'type %(actual_type)s.') class VolumeEncryptionNotSupported(Invalid): message = _("Volume encryption is not supported for %(volume_type)s " "volume %(volume_id)s.") class VolumeLocalCacheNotSupported(Invalid): message = _("Volume local cache is not supported for %(volume_type)s " "volume %(volume_id)s.") # NOTE(mriedem): This extends ValueError to maintain backward compatibility. class InvalidConnectorProtocol(ValueError): pass class ExceptionChainer(BrickException): """A Exception that can contain a group of exceptions. This exception serves as a container for exceptions, useful when we want to store all exceptions that happened during a series of steps and then raise them all together as one. The representation of the exception will include all exceptions and their tracebacks. This class also includes a context manager for convenience, one that will support both swallowing the exception as if nothing had happened and raising the exception. In both cases the exception will be stored. If a message is provided to the context manager it will be formatted and logged with warning level. """ def __init__(self, *args, **kwargs): self._exceptions: list[tuple] = [] self._repr: Optional[str] = None self._exc_msg_args = [] super(ExceptionChainer, self).__init__(*args, **kwargs) def __repr__(self): # Since generating the representation can be slow we cache it if not self._repr: tracebacks = ( ''.join(traceback.format_exception(*e)).replace('\n', '\n\t') for e in self._exceptions) self._repr = '\n'.join('\nChained Exception #%s\n\t%s' % (i + 1, t) for i, t in enumerate(tracebacks)) return self._repr __str__ = __repr__ def __nonzero__(self) -> bool: # We want to be able to do boolean checks on the exception return bool(self._exceptions) __bool__ = __nonzero__ # For Python 3 def add_exception(self, exc_type, exc_val, exc_tb) -> None: # Clear the representation cache self._repr = None self._exceptions.append((exc_type, exc_val, exc_tb)) def context(self, catch_exception: bool, msg: str = '', *msg_args: Any): self._catch_exception = catch_exception self._exc_msg = msg self._exc_msg_args = list(msg_args) return self def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type: self.add_exception(exc_type, exc_val, exc_tb) if self._exc_msg: LOG.warning(self._exc_msg, *self._exc_msg_args) if self._catch_exception: return True class ExecutionTimeout(putils.ProcessExecutionError): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/executor.py0000664000175000017500000000606300000000000017425 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic exec utility that allows us to set the execute and root_helper attributes for putils. Some projects need their own execute wrapper and root_helper settings, so this provides that hook. """ from __future__ import annotations import threading from typing import Callable from oslo_concurrency import processutils as putils from oslo_context import context as context_utils from oslo_utils import encodeutils from os_brick.privileged import rootwrap as priv_rootwrap class Executor(object): def __init__(self, root_helper, execute=None, *args, **kwargs): if execute is None: execute = priv_rootwrap.execute self.set_execute(execute) self.set_root_helper(root_helper) @staticmethod def safe_decode(string) -> str: return string and encodeutils.safe_decode(string, errors='ignore') @classmethod def make_putils_error_safe(cls, exc: putils.ProcessExecutionError) -> None: """Converts ProcessExecutionError string attributes to unicode.""" for field in ('stderr', 'stdout', 'cmd', 'description'): value = getattr(exc, field, None) if value: setattr(exc, field, cls.safe_decode(value)) def _execute(self, *args, **kwargs) -> tuple[str, str]: try: result = self.__execute(*args, **kwargs) if result: result = (self.safe_decode(result[0]), self.safe_decode(result[1])) return result except putils.ProcessExecutionError as e: self.make_putils_error_safe(e) raise def set_execute(self, execute: Callable) -> None: self.__execute = execute def set_root_helper(self, helper: str) -> None: self._root_helper = helper class Thread(threading.Thread): """Thread class that inherits the parent's context. This is useful when you are spawning a thread and want LOG entries to display the right context information, such as the request. """ def __init__(self, *args, **kwargs): # Store the caller's context as a private variable shared among threads self.__context__ = context_utils.get_current() super(Thread, self).__init__(*args, **kwargs) def run(self) -> None: # Store the context in the current thread's request store if self.__context__: self.__context__.update_store() super(Thread, self).run() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/i18n.py0000664000175000017500000000153100000000000016341 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/ . """ import oslo_i18n as i18n DOMAIN = 'os-brick' _translators = i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.603759 os_brick-6.11.0/os_brick/initiator/0000775000175000017500000000000000000000000017212 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/__init__.py0000664000175000017500000000307300000000000021326 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Brick's Initiator module. The initator module contains the capabilities for discovering the initiator information as well as discovering and removing volumes from a host. """ import re DEVICE_SCAN_ATTEMPTS_DEFAULT = 3 MULTIPATH_ERROR_REGEX = re.compile(r"\w{3} \d+ \d\d:\d\d:\d\d \|.*$") MULTIPATH_PATH_CHECK_REGEX = re.compile(r"\s+\d+:\d+:\d+:\d+\s+") PLATFORM_ALL = 'ALL' PLATFORM_x86 = 'X86' PLATFORM_S390 = 'S390' PLATFORM_PPC64 = 'PPC64' OS_TYPE_ALL = 'ALL' OS_TYPE_LINUX = 'LINUX' OS_TYPE_WINDOWS = 'WIN' S390X = "s390x" S390 = "s390" PPC64 = "ppc64" PPC64LE = "ppc64le" ISCSI = "ISCSI" ISER = "ISER" FIBRE_CHANNEL = "FIBRE_CHANNEL" NFS = "NFS" SMBFS = 'SMBFS' GLUSTERFS = "GLUSTERFS" LOCAL = "LOCAL" HUAWEISDSHYPERVISOR = "HUAWEISDSHYPERVISOR" RBD = "RBD" SCALEIO = "SCALEIO" SCALITY = "SCALITY" QUOBYTE = "QUOBYTE" VZSTORAGE = "VZSTORAGE" VMDK = "VMDK" GPFS = "GPFS" STORPOOL = "STORPOOL" NVME = "NVME" NVMEOF = "NVMEOF" LIGHTOS = "LIGHTOS" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connector.py0000664000175000017500000002656400000000000021573 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Brick Connector objects for each supported transport protocol. .. module: connector The connectors here are responsible for discovering and removing volumes for each of the supported transport protocols. """ import platform import socket import sys from oslo_log import log as logging from oslo_utils import importutils from os_brick import exception from os_brick.i18n import _ from os_brick import initiator from os_brick import utils LOG = logging.getLogger(__name__) # List of connectors to call when getting # the connector properties for a host windows_connector_list = [ 'os_brick.initiator.windows.base.BaseWindowsConnector', 'os_brick.initiator.windows.iscsi.WindowsISCSIConnector', 'os_brick.initiator.windows.fibre_channel.WindowsFCConnector', 'os_brick.initiator.windows.rbd.WindowsRBDConnector', 'os_brick.initiator.windows.smbfs.WindowsSMBFSConnector' ] unix_connector_list = [ 'os_brick.initiator.connectors.base.BaseLinuxConnector', 'os_brick.initiator.connectors.iscsi.ISCSIConnector', 'os_brick.initiator.connectors.fibre_channel.FibreChannelConnector', ('os_brick.initiator.connectors.fibre_channel_s390x.' 'FibreChannelConnectorS390X'), ('os_brick.initiator.connectors.fibre_channel_ppc64.' 'FibreChannelConnectorPPC64'), 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', 'os_brick.initiator.connectors.rbd.RBDConnector', 'os_brick.initiator.connectors.local.LocalConnector', 'os_brick.initiator.connectors.gpfs.GPFSConnector', 'os_brick.initiator.connectors.huawei.HuaweiStorHyperConnector', 'os_brick.initiator.connectors.scaleio.ScaleIOConnector', 'os_brick.initiator.connectors.vmware.VmdkConnector', 'os_brick.initiator.connectors.storpool.StorPoolConnector', 'os_brick.initiator.connectors.nvmeof.NVMeOFConnector', 'os_brick.initiator.connectors.lightos.LightOSConnector', ] def _get_connector_list(): if sys.platform != 'win32': return unix_connector_list else: return windows_connector_list # Mappings used to determine who to construct in the factory _connector_mapping_linux = { initiator.GLUSTERFS: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.NFS: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.SCALITY: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.QUOBYTE: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.VZSTORAGE: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.ISCSI: 'os_brick.initiator.connectors.iscsi.ISCSIConnector', initiator.ISER: 'os_brick.initiator.connectors.iscsi.ISCSIConnector', initiator.FIBRE_CHANNEL: 'os_brick.initiator.connectors.fibre_channel.FibreChannelConnector', initiator.LOCAL: 'os_brick.initiator.connectors.local.LocalConnector', initiator.HUAWEISDSHYPERVISOR: 'os_brick.initiator.connectors.huawei.HuaweiStorHyperConnector', initiator.RBD: 'os_brick.initiator.connectors.rbd.RBDConnector', initiator.SCALEIO: 'os_brick.initiator.connectors.scaleio.ScaleIOConnector', initiator.VMDK: 'os_brick.initiator.connectors.vmware.VmdkConnector', initiator.GPFS: 'os_brick.initiator.connectors.gpfs.GPFSConnector', initiator.STORPOOL: 'os_brick.initiator.connectors.storpool.StorPoolConnector', # Leave this in for backwards compatibility # This isn't an NVME connector, but NVME Over Fabrics initiator.NVME: 'os_brick.initiator.connectors.nvmeof.NVMeOFConnector', initiator.NVMEOF: 'os_brick.initiator.connectors.nvmeof.NVMeOFConnector', initiator.LIGHTOS: 'os_brick.initiator.connectors.lightos.LightOSConnector', } # Mapping for the S390X platform _connector_mapping_linux_s390x = { initiator.FIBRE_CHANNEL: 'os_brick.initiator.connectors.fibre_channel_s390x.' 'FibreChannelConnectorS390X', initiator.NFS: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.ISCSI: 'os_brick.initiator.connectors.iscsi.ISCSIConnector', initiator.LOCAL: 'os_brick.initiator.connectors.local.LocalConnector', initiator.RBD: 'os_brick.initiator.connectors.rbd.RBDConnector', initiator.GPFS: 'os_brick.initiator.connectors.gpfs.GPFSConnector', } # Mapping for the PPC64 platform _connector_mapping_linux_ppc64 = { initiator.FIBRE_CHANNEL: ('os_brick.initiator.connectors.fibre_channel_ppc64.' 'FibreChannelConnectorPPC64'), initiator.NFS: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.ISCSI: 'os_brick.initiator.connectors.iscsi.ISCSIConnector', initiator.LOCAL: 'os_brick.initiator.connectors.local.LocalConnector', initiator.RBD: 'os_brick.initiator.connectors.rbd.RBDConnector', initiator.GPFS: 'os_brick.initiator.connectors.gpfs.GPFSConnector', initiator.VZSTORAGE: 'os_brick.initiator.connectors.remotefs.RemoteFsConnector', initiator.ISER: 'os_brick.initiator.connectors.iscsi.ISCSIConnector', } # Mapping for the windows connectors _connector_mapping_windows = { initiator.ISCSI: 'os_brick.initiator.windows.iscsi.WindowsISCSIConnector', initiator.FIBRE_CHANNEL: 'os_brick.initiator.windows.fibre_channel.WindowsFCConnector', initiator.RBD: 'os_brick.initiator.windows.rbd.WindowsRBDConnector', initiator.SMBFS: 'os_brick.initiator.windows.smbfs.WindowsSMBFSConnector', } # Create aliases to the old names until 2.0.0 # TODO(smcginnis) Remove this lookup once unit test code is updated to # point to the correct location def _set_aliases(): conn_list = _get_connector_list() # TODO(lpetrut): Cinder is explicitly trying to use those two # connectors. We should drop this once we fix Cinder and # get passed the backwards compatibility period. if sys.platform == 'win32': conn_list += [ 'os_brick.initiator.connectors.iscsi.ISCSIConnector', ('os_brick.initiator.connectors.fibre_channel.' 'FibreChannelConnector'), ] for item in conn_list: _name = item.split('.')[-1] globals()[_name] = importutils.import_class(item) _set_aliases() @utils.trace def get_connector_properties(root_helper, my_ip, multipath, enforce_multipath, host=None, execute=None): """Get the connection properties for all protocols. When the connector wants to use multipath, multipath=True should be specified. If enforce_multipath=True is specified too, an exception is thrown when multipathd is not running. Otherwise, it falls back to multipath=False and only the first path shown up is used. For the compatibility reason, even if multipath=False is specified, some cinder storage drivers may export the target for multipath, which can be found via sendtargets discovery. :param root_helper: The command prefix for executing as root. :type root_helper: str :param my_ip: The IP address of the local host. :type my_ip: str :param multipath: Enable multipath? :type multipath: bool :param enforce_multipath: Should we enforce that the multipath daemon is running? If the daemon isn't running then raise ProcessExecutionError to the caller. :type enforce_multipath: bool :param host: hostname. :param execute: execute helper. :returns: dict containing all of the collected initiator values. """ props = {} props['platform'] = platform.machine() props['os_type'] = sys.platform props['ip'] = my_ip props['host'] = host if host else socket.gethostname() for item in _get_connector_list(): connector = importutils.import_class(item) if (utils.platform_matches(props['platform'], connector.platform) and utils.os_matches(props['os_type'], connector.os_type)): props = utils.merge_dict(props, connector.get_connector_properties( root_helper, host=host, multipath=multipath, enforce_multipath=enforce_multipath, execute=execute)) return props def get_connector_mapping(arch=None): """Get connector mapping based on platform. This is used by Nova to get the right connector information. :param arch: The architecture being requested. """ # We do this instead of assigning it in the definition # to help mocking for unit tests if arch is None: arch = platform.machine() # Set the correct mapping for imports if sys.platform == 'win32': return _connector_mapping_windows elif arch in (initiator.S390, initiator.S390X): return _connector_mapping_linux_s390x elif arch in (initiator.PPC64, initiator.PPC64LE): return _connector_mapping_linux_ppc64 else: return _connector_mapping_linux # TODO(walter-boring) We have to keep this class defined here # so we don't break backwards compatibility class InitiatorConnector(object): @staticmethod def factory(protocol, root_helper, driver=None, use_multipath=False, device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, arch=None, *args, **kwargs): """Build a Connector object based upon protocol and architecture.""" _mapping = get_connector_mapping(arch) LOG.debug("Factory for %(protocol)s on %(arch)s", {'protocol': protocol, 'arch': arch}) protocol = protocol.upper() # set any special kwargs needed by connectors if protocol in (initiator.NFS, initiator.GLUSTERFS, initiator.SCALITY, initiator.QUOBYTE, initiator.VZSTORAGE): kwargs.update({'mount_type': protocol.lower()}) elif protocol == initiator.ISER: kwargs.update({'transport': 'iser'}) # now set all the default kwargs kwargs.update( {'root_helper': root_helper, 'driver': driver, 'use_multipath': use_multipath, 'device_scan_attempts': device_scan_attempts, }) connector = _mapping.get(protocol) if not connector: msg = (_("Invalid InitiatorConnector protocol " "specified %(protocol)s") % dict(protocol=protocol)) raise exception.InvalidConnectorProtocol(msg) conn_cls = importutils.import_class(connector) return conn_cls(*args, **kwargs) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.611759 os_brick-6.11.0/os_brick/initiator/connectors/0000775000175000017500000000000000000000000021367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/__init__.py0000664000175000017500000000000000000000000023466 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/base.py0000664000175000017500000002074300000000000022661 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import functools import glob import os import typing from typing import Optional from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import reflection from oslo_utils import timeutils from os_brick import exception from os_brick import initiator from os_brick.initiator import host_driver from os_brick.initiator import initiator_connector from os_brick.initiator import linuxscsi from os_brick import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF def synchronized(name, lock_file_prefix='os-brick-', external=False, lock_path=None, semaphores=None, delay=0.01, fair=False, blocking=True): """os-brick synchronization decorator Like the one in lock_utils but defaulting the prefix to os-brick- and using our own lock_path. Cannot use lock_utils one because when using the default we don't know the value until setup has been called, which can be after the code using the decorator has been loaded. """ def wrap(f): @functools.wraps(f) def inner(*args, **kwargs): t1 = timeutils.now() t2 = None gotten = True lpath = lock_path or CONF.os_brick.lock_path # TODO: (AA Release) Remove this failsafe if not lpath and CONF.oslo_concurrency.lock_path: LOG.warning("Service needs to call os_brick.setup() before " "connecting volumes, if it doesn't it will break " "on the next release") lpath = CONF.oslo_concurrency.lock_path f_name = reflection.get_callable_name(f) try: LOG.debug('Acquiring lock "%s" by "%s"', name, f_name) with lockutils.lock(name, lock_file_prefix, external, lpath, do_log=False, semaphores=semaphores, delay=delay, fair=fair, blocking=blocking): t2 = timeutils.now() LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: ' 'waited %(wait_secs)0.3fs', {'name': name, 'function': f_name, 'wait_secs': (t2 - t1)}) return f(*args, **kwargs) except lockutils.AcquireLockFailedException: gotten = False finally: t3 = timeutils.now() if t2 is None: held_secs = "N/A" else: held_secs = "%0.3fs" % (t3 - t2) LOG.debug('Lock "%(name)s" "%(gotten)s" by "%(function)s" ::' ' held %(held_secs)s', {'name': name, 'gotten': 'released' if gotten else 'unacquired', 'function': f_name, 'held_secs': held_secs}) return inner return wrap class BaseLinuxConnector(initiator_connector.InitiatorConnector): os_type = initiator.OS_TYPE_LINUX def __init__(self, root_helper: str, driver=None, execute=None, *args, **kwargs): self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute=execute) if not driver: driver = host_driver.HostDriver() self.set_driver(driver) super(BaseLinuxConnector, self).__init__(root_helper, execute=execute, *args, **kwargs) @staticmethod def get_connector_properties(root_helper: str, *args, **kwargs) -> dict: """The generic connector properties.""" # The 'multipath' and 'enforce_multipath' values will be used by # the caller to verify multipathing in connect_volume. return { 'multipath': kwargs['multipath'], 'enforce_multipath': kwargs['enforce_multipath'], } def supports_multipath(self): """Generic method to report multipath support. Each connector, which supports multipath, should override this method and provide its own implementation of checking the multipath support. See implementation in iSCSI, FC or NVMe connectors for reference. """ return False def check_multipath(self, connection_properties): LOG.debug("Connection properties %s", connection_properties) multipath = self.use_multipath # If we are using an old cinder, it will not contain the # 'enforce_multipath' key and we will default the value to False. # Unfortunately, there is is no way to know which Cinder # version we are using when calling get_connector_properties to # keep backward compatibility. enforce_multipath = connection_properties.get( 'enforce_multipath', False) if not self.supports_multipath(): if multipath and enforce_multipath: raise exception.BrickException( "Multipathing is enforced but the host doesn't " "support multipathing.") if multipath and not enforce_multipath: LOG.warning( "Multipathing is requested but the host " "doesn't support multipathing.") def check_valid_device(self, path: str, run_as_root: bool = True) -> bool: return utils.check_valid_device(self, path) def get_all_available_volumes( self, connection_properties: Optional[dict] = None) -> list: volumes = [] path = self.get_search_path() if path: # now find all entries in the search path if os.path.isdir(path): path_items = [path, '/*'] file_filter = ''.join(path_items) volumes = glob.glob(file_filter) return volumes def _discover_mpath_device(self, device_wwn: str, connection_properties: dict, device_name: str) -> tuple[str, str]: """This method discovers a multipath device. Discover a multipath device based on a defined connection_property and a device_wwn and return the multipath_id and path of the multipath enabled device if there is one. """ path = self._linuxscsi.find_multipath_device_path(device_wwn) device_path = None multipath_id = None if path is None: # find_multipath_device only accept realpath not symbolic path device_realpath = os.path.realpath(device_name) mpath_info = self._linuxscsi.find_multipath_device( device_realpath) if mpath_info: device_path = mpath_info['device'] multipath_id = device_wwn else: # we didn't find a multipath device. # so we assume the kernel only sees 1 device device_path = device_name LOG.debug("Unable to find multipath device name for " "volume. Using path %(device)s for volume.", {'device': device_path}) else: device_path = path multipath_id = device_wwn if connection_properties.get('access_mode', '') != 'ro': try: # Sometimes the multipath devices will show up as read only # initially and need additional time/rescans to get to RW. self._linuxscsi.wait_for_rw(device_wwn, device_path) except exception.BlockDeviceReadOnly: LOG.warning('Block device %s is still read-only. ' 'Continuing anyway.', device_path) device_path = typing.cast(str, device_path) multipath_id = typing.cast(str, multipath_id) return device_path, multipath_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/base_iscsi.py0000664000175000017500000000437400000000000024055 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import copy from typing import Any, Generator from os_brick.initiator import initiator_connector class BaseISCSIConnector(initiator_connector.InitiatorConnector): def _iterate_all_targets( self, connection_properties: dict) -> Generator[dict[str, Any], None, None]: for portal, iqn, lun in self._get_all_targets(connection_properties): props = copy.deepcopy(connection_properties) props['target_portal'] = portal props['target_iqn'] = iqn props['target_lun'] = lun for key in ('target_portals', 'target_iqns', 'target_luns'): props.pop(key, None) yield props @staticmethod def _get_luns(con_props: dict, iqns=None) -> list: luns = con_props.get('target_luns') num_luns = len(con_props['target_iqns']) if iqns is None else len(iqns) return luns or [con_props['target_lun']] * num_luns def _get_all_targets( self, connection_properties: dict) -> \ list[tuple[str, str, list | str | int]]: if all(key in connection_properties for key in ('target_portals', 'target_iqns')): return list(zip(connection_properties['target_portals'], connection_properties['target_iqns'], self._get_luns(connection_properties))) return [(connection_properties['target_portal'], connection_properties['target_iqn'], connection_properties.get('target_lun', 0))] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/base_rbd.py0000664000175000017500000000374600000000000023514 0ustar00zuulzuul00000000000000# Copyright 2020 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations from typing import Any, Optional from oslo_log import log as logging from oslo_utils import netutils LOG = logging.getLogger(__name__) class RBDConnectorMixin(object): """Mixin covering cross platform RBD connector functionality""" @staticmethod def _sanitize_mon_hosts(hosts: list[str]) -> list[str]: def _sanitize_host(host: str) -> str: if netutils.is_valid_ipv6(host): host = '[%s]' % host return host return list(map(_sanitize_host, hosts)) @classmethod def _get_rbd_args(cls, connection_properties: dict[str, Any], conf: Optional[str] = None) -> list[str]: user = connection_properties.get('auth_username') monitor_ips = connection_properties.get('hosts') monitor_ports = connection_properties.get('ports') args: list[str] = [] if user: args = ['--id', user] if monitor_ips and monitor_ports: monitors = ["%s:%s" % (ip, port) for ip, port in zip( cls._sanitize_mon_hosts(monitor_ips), monitor_ports)] for monitor in monitors: args += ['--mon_host', monitor] if conf: args += ['--conf', conf] return args ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/fake.py0000664000175000017500000000313500000000000022651 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.initiator.connectors import base from os_brick.initiator.connectors import base_iscsi class FakeConnector(base.BaseLinuxConnector): fake_path = '/dev/vdFAKE' def connect_volume(self, connection_properties): fake_device_info = {'type': 'fake', 'path': self.fake_path} return fake_device_info def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): pass def get_volume_paths(self, connection_properties): return [self.fake_path] def get_search_path(self): return '/dev/disk/by-path' def extend_volume(self, connection_properties): return None def get_all_available_volumes(self, connection_properties=None): return ['/dev/disk/by-path/fake-volume-1', '/dev/disk/by-path/fake-volume-X'] class FakeBaseISCSIConnector(FakeConnector, base_iscsi.BaseISCSIConnector): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/fibre_channel.py0000664000175000017500000004707600000000000024536 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import os import typing from typing import Any, Optional from oslo_log import log as logging from oslo_service import loopingcall from os_brick import exception from os_brick.i18n import _ from os_brick import initiator from os_brick.initiator.connectors import base from os_brick.initiator import linuxfc from os_brick import utils LOG = logging.getLogger(__name__) class FibreChannelConnector(base.BaseLinuxConnector): """Connector class to attach/detach Fibre Channel volumes.""" def __init__( self, root_helper: str, driver=None, execute: Optional[str] = None, use_multipath: bool = False, device_scan_attempts: int = initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): self._linuxfc = linuxfc.LinuxFibreChannel(root_helper, execute) super(FibreChannelConnector, self).__init__( root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) # type: ignore self.use_multipath = use_multipath self.device_name: Optional[str] self.host_device: Optional[str] self.tries: int def set_execute(self, execute) -> None: super(FibreChannelConnector, self).set_execute(execute) self._linuxscsi.set_execute(execute) self._linuxfc.set_execute(execute) @staticmethod def get_connector_properties( root_helper: str, *args, **kwargs) -> dict[str, Any]: """The Fibre Channel connector properties.""" props = {} fc = linuxfc.LinuxFibreChannel(root_helper, execute=kwargs.get('execute')) wwpns = fc.get_fc_wwpns() if wwpns: props['wwpns'] = wwpns wwnns = fc.get_fc_wwnns() if wwnns: props['wwnns'] = wwnns return props def get_search_path(self) -> str: """Where do we look for FC based volumes.""" return '/dev/disk/by-path' def _add_targets_to_connection_properties( self, connection_properties: dict) -> dict: LOG.debug('Adding targets to connection properties receives: %s', connection_properties) target_wwn = connection_properties.get('target_wwn') target_wwns = connection_properties.get('target_wwns') if target_wwns: wwns = target_wwns elif isinstance(target_wwn, list): wwns = target_wwn elif isinstance(target_wwn, str): wwns = [target_wwn] else: wwns = [] # Convert wwns to lower case wwns = [wwn.lower() for wwn in wwns] if target_wwns: connection_properties['target_wwns'] = wwns elif target_wwn: connection_properties['target_wwn'] = wwns target_lun = connection_properties.get('target_lun', 0) target_luns = connection_properties.get('target_luns') if target_luns: luns = target_luns elif isinstance(target_lun, int): luns = [target_lun] else: luns = [] if len(luns) == len(wwns): # Handles single wwn + lun or multiple, potentially # different wwns or luns targets = list(zip(wwns, luns)) elif len(luns) == 1 and len(wwns) > 1: # For the case of multiple wwns, but a single lun (old path) targets = [(wwn, luns[0]) for wwn in wwns] else: # Something is wrong, this shouldn't happen. msg = _("Unable to find potential volume paths for FC device " "with luns: %(luns)s and wwns: %(wwns)s.") % { "luns": luns, "wwns": wwns} LOG.error(msg) raise exception.VolumePathsNotFound(msg) connection_properties['targets'] = targets wwpn_lun_map = {wwpn: lun for wwpn, lun in targets} # If there is an initiator_target_map we can update it too and generate # the initiator_target_lun_map from it if connection_properties.get('initiator_target_map') is not None: # Convert it to lower case itmap = connection_properties['initiator_target_map'] itmap = {k.lower(): [port.lower() for port in v] for k, v in itmap.items()} connection_properties['initiator_target_map'] = itmap itmaplun = dict() for init_wwpn, target_wwpns in itmap.items(): itmaplun[init_wwpn] = [(target_wwpn, wwpn_lun_map[target_wwpn]) for target_wwpn in target_wwpns if target_wwpn in wwpn_lun_map] # We added the if in the previous list comprehension in case # drivers return targets in the map that are not reported in # target_wwn or target_wwns, but we warn about it. if len(itmaplun[init_wwpn]) != len(itmap[init_wwpn]): unknown = set(itmap[init_wwpn]) unknown.difference_update(itmaplun[init_wwpn]) LOG.warning('Driver returned an unknown targets in the ' 'initiator mapping %s', ', '.join(unknown)) connection_properties['initiator_target_lun_map'] = itmaplun LOG.debug('Adding targets to connection properties returns: %s', connection_properties) return connection_properties def _get_possible_volume_paths( self, connection_properties: dict, hbas) -> list[str]: targets = connection_properties['targets'] addressing_mode = connection_properties.get('addressing_mode') possible_devs = self._get_possible_devices(hbas, targets, addressing_mode) host_paths = self._get_host_devices(possible_devs) return host_paths def get_volume_paths(self, connection_properties: dict) -> list[str]: volume_paths = [] # first fetch all of the potential paths that might exist # how the FC fabric is zoned may alter the actual list # that shows up on the system. So, we verify each path. hbas = self._linuxfc.get_fc_hbas_info() device_paths = self._get_possible_volume_paths( connection_properties, hbas) for path in device_paths: if os.path.exists(path): volume_paths.append(path) return volume_paths @utils.trace @base.synchronized('extend_volume', external=True) @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties: dict) -> Optional[int]: """Update the local kernel's size information. Try and update the local kernel's size information for an FC volume. """ connection_properties = self._add_targets_to_connection_properties( connection_properties) volume_paths = self.get_volume_paths(connection_properties) if volume_paths: return self._linuxscsi.extend_volume( volume_paths, use_multipath=self.use_multipath) else: LOG.warning("Couldn't find any volume paths on the host to " "extend volume for %(props)s", {'props': connection_properties}) raise exception.VolumePathsNotFound() def supports_multipath(self): return self._linuxscsi.is_multipath_running( root_helper=self._root_helper) @utils.trace @utils.connect_volume_prepare_result @base.synchronized('connect_volume', external=True) def connect_volume(self, connection_properties: dict) -> dict: """Attach the volume to instance_name. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: dict connection_properties for Fibre Channel must include: target_wwn - World Wide Name target_lun - LUN id of the volume """ self.check_multipath(connection_properties) device_info = {'type': 'block'} connection_properties = self._add_targets_to_connection_properties( connection_properties) hbas = self._linuxfc.get_fc_hbas_info() if not hbas: LOG.warning("We are unable to locate any Fibre Channel devices.") raise exception.NoFibreChannelHostsFound() host_devices = self._get_possible_volume_paths( connection_properties, hbas) # The /dev/disk/by-path/... node is not always present immediately # We only need to find the first device. Once we see the first device # multipath will have any others. def _wait_for_device_discovery(host_devices: list[str]) -> None: for device in host_devices: LOG.debug("Looking for Fibre Channel dev %(device)s", {'device': device}) if os.path.exists(device) and self.check_valid_device(device): self.host_device = device # get the /dev/sdX device. This variable is maintained to # keep the same log output. self.device_name = os.path.realpath(device) raise loopingcall.LoopingCallDone() if self.tries >= self.device_scan_attempts: LOG.error("Fibre Channel volume device not found.") raise exception.NoFibreChannelVolumeDeviceFound() LOG.info("Fibre Channel volume device not yet found. " "Will rescan & retry. Try number: %(tries)s.", {'tries': self.tries}) self._linuxfc.rescan_hosts(hbas, connection_properties) self.tries = self.tries + 1 self.host_device = None self.device_name = None self.tries = 0 timer = loopingcall.FixedIntervalLoopingCall( _wait_for_device_discovery, host_devices) timer.start(interval=2).wait() self.host_device = typing.cast(str, self.host_device) LOG.debug("Found Fibre Channel volume %(name)s " "(after %(tries)s rescans.)", {'name': self.device_name, 'tries': self.tries}) # find out the WWN of the device device_wwn = self._linuxscsi.get_scsi_wwn(self.host_device) LOG.debug("Device WWN = '%(wwn)s'", {'wwn': device_wwn}) device_info['scsi_wwn'] = device_wwn # see if the new drive is part of a multipath # device. If so, we'll use the multipath device. if self.use_multipath: # Pass a symlink, not a real path, otherwise we'll get a real path # back if we don't find a multipath and we'll return that to the # caller, breaking Nova's encryption which requires a symlink. assert self.host_device is not None (device_path, multipath_id) = self._discover_mpath_device( device_wwn, connection_properties, self.host_device) if multipath_id: # only set the multipath_id if we found one device_info['multipath_id'] = multipath_id if self.device_name: device = os.path.basename(self.device_name) mpath = self._linuxscsi.find_sysfs_multipath_dm( [device]) if mpath: # Sometimes the multipath device doesn't show up # in time and we don't want to fail here. # Wait for multipath device to be ready for I/O self._linuxscsi.wait_for_mpath_device(mpath) else: device_path = self.host_device device_path = typing.cast(str, device_path) device_info['path'] = device_path return device_info def _get_host_devices(self, possible_devs: list) -> list[str]: """Compute the device paths on the system with an id, wwn, and lun :param possible_devs: list of (platform, pci_id, wwn, lun) tuples :return: list of device paths on the system based on the possible_devs """ host_devices = [] for platform, pci_num, target_wwn, lun in possible_devs: host_device = "/dev/disk/by-path/%spci-%s-fc-%s-lun-%s" % ( platform + '-' if platform else '', pci_num, target_wwn, self._linuxscsi.process_lun_id(lun)) host_devices.append(host_device) return host_devices def _get_possible_devices(self, hbas: list, targets: list, addressing_mode: Optional[str] = None) -> \ list[tuple[str, Any, str, int]]: """Compute the possible fibre channel device options. :param hbas: available hba devices. :param targets: tuple of possible wwn addresses and lun combinations. :returns: list of (platform, pci_id, wwn, lun) tuples Given one or more wwn (mac addresses for fibre channel) ports do the matrix math to figure out a set of pci device, wwn tuples that are potentially valid (they won't all be). This provides a search space for the device connection. """ raw_devices = [] for hba in hbas: platform, pci_num = self._get_pci_num(hba) if pci_num is not None: for wwn, lun in targets: lun = self._linuxscsi.lun_for_addressing(lun, addressing_mode) target_wwn = "0x%s" % wwn.lower() raw_devices.append((platform, pci_num, target_wwn, lun)) return raw_devices @utils.trace @base.synchronized('connect_volume', external=True) @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties: dict, device_info: dict, force: bool = False, ignore_errors: bool = False) -> None: """Detach the volume from instance_name. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict connection_properties for Fibre Channel must include: target_wwn - World Wide Name target_lun - LUN id of the volume """ exc = exception.ExceptionChainer() devices = [] wwn = None connection_properties = self._add_targets_to_connection_properties( connection_properties) volume_paths = self.get_volume_paths(connection_properties) mpath_path = None for path in volume_paths: real_path = self._linuxscsi.get_name_from_path(path) if (self.use_multipath and not mpath_path and self.check_valid_device(path)): wwn = self._linuxscsi.get_scsi_wwn(path) mpath_path = self._linuxscsi.find_multipath_device_path(wwn) if mpath_path: with exc.context(force, 'Flushing %s failed', mpath_path): self._linuxscsi.flush_multipath_device(mpath_path) real_path = typing.cast(str, real_path) dev_info = self._linuxscsi.get_device_info(real_path) devices.append(dev_info) # If flush failed, then remove it forcefully since force=True if mpath_path and exc: with exc.context(force, 'Removing multipath %s failed', mpath_path): mpath_name = os.path.basename(os.path.realpath(mpath_path)) self._linuxscsi.multipath_del_map(mpath_name) LOG.debug("devices to remove = %s", devices) self._remove_devices(connection_properties, devices, device_info, force, exc) if exc: LOG.warning('There were errors removing %s, leftovers may remain ' 'in the system', volume_paths) if not ignore_errors: raise exc def _remove_devices(self, connection_properties: dict, devices: list, device_info: dict, force: bool, exc) -> None: # There may have been more than 1 device mounted # by the kernel for this volume. We have to remove # all of them path_used = utils.get_dev_path(connection_properties, device_info) # NOTE: Due to bug #1897787 device_info may have a real path for some # single paths instead of a symlink as it should have, so it'll only # be a multipath if it was a symlink (not real path) and it wasn't a # single path symlink (those have filenames starting with pci-) # We don't use os.path.islink in case the file is no longer there. was_symlink = path_used.count(os.sep) > 2 # We check for /pci because that's the value we return for single # paths, whereas for multipaths we have multiple link formats. was_multipath = '/pci-' not in path_used and was_symlink for device in devices: with exc.context(force, 'Removing device %s failed', device): device_path = device['device'] flush = self._linuxscsi.requires_flush(device_path, path_used, was_multipath) self._linuxscsi.remove_scsi_device(device_path, flush=flush) def _get_pci_num(self, hba: Optional[dict]) -> tuple: # NOTE(walter-boring) # device path is in format of (FC and FCoE) : # /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2 # /sys/devices/pci0000:20/0000:20:03.0/0000:21:00.2/net/ens2f2/ctlr_2 # /host3/fc_host/host3 # we always want the value prior to the host or net value # on non x86_64 device, pci devices may be appended on platform device, # /sys/devices/platform/smb/smb:motherboard/80040000000.peu0-c0/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2 # noqa # so also return a platform id if it exists platform = None if hba is not None: if "device_path" in hba: device_path = hba['device_path'].split('/') has_platform = (len(device_path) > 3 and device_path[3] == 'platform') for index, value in enumerate(device_path): if has_platform and value.startswith('pci'): platform = "platform-%s" % device_path[index - 1] if value.startswith('net') or value.startswith('host'): return platform, device_path[index - 1] return None, None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/fibre_channel_ppc64.py0000664000175000017500000000367300000000000025545 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from os_brick import initiator from os_brick.initiator.connectors import fibre_channel LOG = logging.getLogger(__name__) class FibreChannelConnectorPPC64(fibre_channel.FibreChannelConnector): """Connector class to attach/detach Fibre Channel volumes on PPC64 arch.""" platform = initiator.PLATFORM_PPC64 def __init__(self, root_helper, driver=None, execute=None, use_multipath=False, device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): super(FibreChannelConnectorPPC64, self).__init__( root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) self.use_multipath = use_multipath def set_execute(self, execute): super(FibreChannelConnectorPPC64, self).set_execute(execute) self._linuxscsi.set_execute(execute) self._linuxfc.set_execute(execute) def _get_host_devices(self, possible_devs, lun): host_devices = set() for pci_num, target_wwn in possible_devs: host_device = "/dev/disk/by-path/fc-%s-lun-%s" % ( target_wwn, self._linuxscsi.process_lun_id(lun)) host_devices.add(host_device) return list(host_devices) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/fibre_channel_s390x.py0000664000175000017500000001034400000000000025470 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from os_brick import initiator from os_brick.initiator.connectors import fibre_channel from os_brick.initiator import linuxfc LOG = logging.getLogger(__name__) class FibreChannelConnectorS390X(fibre_channel.FibreChannelConnector): """Connector class to attach/detach Fibre Channel volumes on S390X arch.""" platform = initiator.PLATFORM_S390 def __init__(self, root_helper, driver=None, execute=None, use_multipath=False, device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): super(FibreChannelConnectorS390X, self).__init__( root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) LOG.debug("Initializing Fibre Channel connector for S390") self._linuxfc = linuxfc.LinuxFibreChannelS390X(root_helper, execute) self.use_multipath = use_multipath def set_execute(self, execute): super(FibreChannelConnectorS390X, self).set_execute(execute) self._linuxscsi.set_execute(execute) self._linuxfc.set_execute(execute) def _get_host_devices(self, possible_devs): host_devices = [] for pci_num, target_wwn, lun in possible_devs: host_device = self._get_device_file_path( pci_num, target_wwn, lun) # NOTE(arne_r) # LUN driver path is the same on all distros, so no need to have # multiple calls here self._linuxfc.configure_scsi_device(pci_num, target_wwn, self._get_lun_string(lun)) host_devices.extend(host_device) return host_devices def _get_lun_string(self, lun): target_lun = 0 if lun <= 0xffff: target_lun = "0x%04x000000000000" % lun elif lun <= 0xffffffff: target_lun = "0x%08x00000000" % lun return target_lun def _get_device_file_path(self, pci_num, target_wwn, lun): # NOTE(arne_r) # Need to add multiple possible ways to resolve device paths, # depending on OS. Since it gets passed to '_get_possible_volume_paths' # having a mismatch is not a problem host_device = [ # RHEL based "/dev/disk/by-path/ccw-%s-zfcp-%s:%s" % ( pci_num, target_wwn, self._get_lun_string(lun)), # Debian based (e.g. for storwize) "/dev/disk/by-path/ccw-%s-fc-%s-lun-%s" % ( pci_num, target_wwn, lun), # Debian based (e.g. for ds8k) "/dev/disk/by-path/ccw-%s-fc-%s-lun-%s" % ( pci_num, target_wwn, self._get_lun_string(lun)), ] return host_device def _remove_devices(self, connection_properties, devices, device_info, force, exc): hbas = self._linuxfc.get_fc_hbas_info() targets = connection_properties['targets'] addressing_mode = connection_properties.get('addressing_mode') possible_devs = self._get_possible_devices(hbas, targets, addressing_mode) for platform, pci_num, target_wwn, lun in possible_devs: target_lun = self._get_lun_string(lun) with exc.context(force, 'Removing device %s:%s:%s failed', pci_num, target_wwn, target_lun): self._linuxfc.deconfigure_scsi_device(pci_num, target_wwn, target_lun) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/gpfs.py0000664000175000017500000000307500000000000022705 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.i18n import _ from os_brick.initiator.connectors import local from os_brick import utils class GPFSConnector(local.LocalConnector): """"Connector class to attach/detach File System backed volumes.""" @utils.trace def connect_volume(self, connection_properties): """Connect to a volume. :param connection_properties: The dictionary that describes all of the target volume attributes. connection_properties must include: device_path - path to the volume to be connected :type connection_properties: dict :returns: dict """ if 'device_path' not in connection_properties: msg = (_("Invalid connection_properties specified " "no device_path attribute.")) raise ValueError(msg) device_info = {'type': 'gpfs', 'path': connection_properties['device_path']} return device_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/huawei.py0000664000175000017500000001710600000000000023230 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ from os_brick.initiator.connectors import base from os_brick import utils LOG = logging.getLogger(__name__) class HuaweiStorHyperConnector(base.BaseLinuxConnector): """"Connector class to attach/detach SDSHypervisor volumes.""" attached_success_code = 0 has_been_attached_code = 50151401 attach_mnid_done_code = 50151405 vbs_unnormal_code = 50151209 not_mount_node_code = 50155007 iscliexist = True def __init__(self, root_helper, driver=None, *args, **kwargs): self.cli_path = os.getenv('HUAWEISDSHYPERVISORCLI_PATH') if not self.cli_path: self.cli_path = '/usr/local/bin/sds/sds_cli' LOG.debug("CLI path is not configured, using default %s.", self.cli_path) if not os.path.isfile(self.cli_path): self.iscliexist = False LOG.error('SDS CLI file not found, ' 'HuaweiStorHyperConnector init failed.') super(HuaweiStorHyperConnector, self).__init__(root_helper, driver=driver, *args, **kwargs) @staticmethod def get_connector_properties(root_helper, *args, **kwargs): """The HuaweiStor connector properties.""" return {} def get_search_path(self): # TODO(walter-boring): Where is the location on the filesystem to # look for Huawei volumes to show up? return None def get_all_available_volumes(self, connection_properties=None): # TODO(walter-boring): what to return here for all Huawei volumes ? return [] def get_volume_paths(self, connection_properties): volume_path = None try: volume_path = self._get_volume_path(connection_properties) except Exception: msg = _("Couldn't find a volume.") LOG.warning(msg) raise exception.BrickException(message=msg) return [volume_path] def _get_volume_path(self, connection_properties): out = self._query_attached_volume( connection_properties['volume_id']) if not out or int(out['ret_code']) != 0: msg = _("Couldn't find attached volume.") LOG.error(msg) raise exception.BrickException(message=msg) return out['dev_addr'] @utils.trace @base.synchronized('connect_volume', external=True) def connect_volume(self, connection_properties): """Connect to a volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: dict """ LOG.debug("Connect_volume connection properties: %s.", connection_properties) out = self._attach_volume(connection_properties['volume_id']) if not out or int(out['ret_code']) not in (self.attached_success_code, self.has_been_attached_code, self.attach_mnid_done_code): msg = (_("Attach volume failed, " "error code is %s") % out['ret_code']) raise exception.BrickException(message=msg) try: volume_path = self._get_volume_path(connection_properties) except Exception: msg = _("query attached volume failed or volume not attached.") LOG.error(msg) raise exception.BrickException(message=msg) device_info = {'type': 'block', 'path': volume_path} return device_info @utils.trace @base.synchronized('connect_volume', external=True) def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect a volume from the local host. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict """ LOG.debug("Disconnect_volume: %s.", connection_properties) out = self._detach_volume(connection_properties['volume_id']) if not out or int(out['ret_code']) not in (self.attached_success_code, self.vbs_unnormal_code, self.not_mount_node_code): msg = (_("Disconnect_volume failed, " "error code is %s") % out['ret_code']) raise exception.BrickException(message=msg) def is_volume_connected(self, volume_name): """Check if volume already connected to host""" LOG.debug('Check if volume %s already connected to a host.', volume_name) out = self._query_attached_volume(volume_name) if out: return int(out['ret_code']) == 0 return False def _attach_volume(self, volume_name): return self._cli_cmd('attach', volume_name) def _detach_volume(self, volume_name): return self._cli_cmd('detach', volume_name) def _query_attached_volume(self, volume_name): return self._cli_cmd('querydev', volume_name) def _cli_cmd(self, method, volume_name): LOG.debug("Enter into _cli_cmd.") if not self.iscliexist: msg = _("SDS command line doesn't exist, " "can't execute SDS command.") raise exception.BrickException(message=msg) if not method or volume_name is None: return cmd = [self.cli_path, '-c', method, '-v', volume_name] out, clilog = self._execute(*cmd, run_as_root=False, root_helper=self._root_helper) analyse_result = self._analyze_output(out) LOG.debug('%(method)s volume returns %(analyse_result)s.', {'method': method, 'analyse_result': analyse_result}) if clilog: LOG.error("SDS CLI output some log: %s.", clilog) return analyse_result def _analyze_output(self, out): LOG.debug("Enter into _analyze_output.") if out: analyse_result = {} out_temp = out.split('\n') for line in out_temp: LOG.debug("Line is %s.", line) if line.find('=') != -1: key, val = line.split('=', 1) LOG.debug("%(key)s = %(val)s", {'key': key, 'val': val}) if key in ['ret_code', 'ret_desc', 'dev_addr']: analyse_result[key] = val return analyse_result else: return None def extend_volume(self, connection_properties): # TODO(walter-boring): is this possible? raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/iscsi.py0000664000175000017500000016357100000000000023070 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations from collections import defaultdict import copy import glob import os import re import time from typing import Any, Iterable, Optional, TypedDict, Union from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from os_brick import exception from os_brick import executor from os_brick.i18n import _ from os_brick import initiator from os_brick.initiator.connectors import base from os_brick.initiator.connectors import base_iscsi from os_brick.initiator import utils as initiator_utils from os_brick import utils LOG = logging.getLogger(__name__) class ConnData(TypedDict): stop_connecting: bool num_logins: int failed_logins: int stopped_threads: int found_devices: list[str] just_added_devices: list[str] class ISCSIConnector(base.BaseLinuxConnector, base_iscsi.BaseISCSIConnector): """Connector class to attach/detach iSCSI volumes.""" supported_transports = ['be2iscsi', 'bnx2i', 'cxgb3i', 'default', 'cxgb4i', 'qla4xxx', 'ocs', 'iser', 'tcp'] VALID_SESSIONS_PREFIX = ('tcp:', 'iser:') def __init__( self, root_helper: str, driver=None, execute=None, use_multipath: bool = False, device_scan_attempts: int = initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, transport: str = 'default', *args, **kwargs): super(ISCSIConnector, self).__init__( root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, transport=transport, *args, **kwargs) # type: ignore self.use_multipath: bool = use_multipath self.transport: str = self._validate_iface_transport(transport) @staticmethod def get_connector_properties(root_helper: str, *args, **kwargs) -> dict: """The iSCSI connector properties.""" props = {} iscsi = ISCSIConnector(root_helper=root_helper, execute=kwargs.get('execute')) initiator = iscsi.get_initiator() if initiator: props['initiator'] = initiator return props def get_search_path(self) -> str: """Where do we look for iSCSI based volumes.""" return '/dev/disk/by-path' def get_volume_paths(self, connection_properties: dict) -> list: """Get the list of existing paths for a volume. This method's job is to simply report what might/should already exist for a volume. We aren't trying to attach/discover a new volume, but find any existing paths for a volume we think is already attached. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict """ volume_paths: list = [] # if there are no sessions, then target_portal won't exist if (('target_portal' not in connection_properties) and ('target_portals' not in connection_properties)): return volume_paths # Don't try and connect to the portals in the list as # this can create empty iSCSI sessions to hosts if they # didn't exist previously. # We are simply trying to find any existing volumes with # already connected sessions. host_devices = self._get_potential_volume_paths(connection_properties) for path in host_devices: if os.path.exists(path): volume_paths.append(path) return volume_paths def _get_iscsi_sessions_full(self) -> list[tuple[str, str, str, str, str]]: """Get iSCSI session information as a list of tuples. Uses iscsiadm -m session and from a command output like tcp: [1] 192.168.121.250:3260,1 iqn.2010-10.org.openstack: volume- (non-flash) This method will drop the node type and return a list like this: [('tcp:', '1', '192.168.121.250:3260', '1', 'iqn.2010-10.org.openstack:volume-')] """ out, err = self._run_iscsi_session() if err: LOG.warning("iscsiadm stderr output when getting sessions: %s", err) # Parse and clean the output from iscsiadm, which is in the form of: # transport_name: [session_id] ip_address:port,tpgt iqn node_type lines: list[tuple[str, str, str, str, str]] = [] for line in out.splitlines(): if line: info = line.split() sid = info[1][1:-1] portal, tpgt = info[2].split(',') lines.append((info[0], sid, portal, tpgt, info[3])) return lines def _get_iscsi_nodes(self) -> list[tuple]: """Get iSCSI node information (portal, iqn) as a list of tuples. Uses iscsiadm -m node and from a command output like 192.168.121.250:3260,1 iqn.2010-10.org.openstack:volume This method will drop the tpgt and return a list like this: [('192.168.121.250:3260', 'iqn.2010-10.org.openstack:volume')] """ out, err = self._execute('iscsiadm', '-m', 'node', run_as_root=True, root_helper=self._root_helper, check_exit_code=False) if err: LOG.warning("Couldn't find iSCSI nodes because iscsiadm err: %s", err) return [] # Parse and clean the output from iscsiadm which is in the form of: # ip_address:port,tpgt iqn lines: list[tuple] = [] for line in out.splitlines(): if line: info = line.split() try: lines.append((info[0].split(',')[0], info[1])) except IndexError: pass return lines def _get_iscsi_sessions(self) -> list: """Return portals for all existing sessions.""" # entry: [tcp, [1], 192.168.121.250:3260,1 ...] return [entry[2] for entry in self._get_iscsi_sessions_full()] def _get_all_targets( self, connection_properties: dict) -> \ list[tuple[str, str, list | int | str]]: addressing_mode = connection_properties.get('addressing_mode') res = super()._get_all_targets(connection_properties) return [(portal, iqn, self._linuxscsi.lun_for_addressing(lun, addressing_mode)) for portal, iqn, lun in res] def _get_ips_iqns_luns( self, connection_properties: dict, discover: bool = True, is_disconnect_call: bool = False) -> list[tuple[Any, Any, Any]]: """Build a list of ips, iqns, and luns. Used when doing singlepath and multipath, and we have 4 cases: - All information is in the connection properties - We have to do an iSCSI discovery to get the information - We don't want to do another discovery and we query the discoverydb - Discovery failed because it was actually a single pathed attachment :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param discover: Whether doing an iSCSI discovery is acceptable. :type discover: bool :param is_disconnect_call: Whether this is a call coming from a user disconnect_volume call or a call from some other operation's cleanup. :type is_disconnect_call: bool :returns: list of tuples of (ip, iqn, lun) """ # There are cases where we don't know if the local attach was done # using multipathing or single pathing, so assume multipathing. try: if ('target_portals' in connection_properties and 'target_iqns' in connection_properties): # Use targets specified by connection_properties ips_iqns_luns = self._get_all_targets(connection_properties) else: method = (self._discover_iscsi_portals if discover else self._get_discoverydb_portals) ips_iqns_luns = method(connection_properties) except exception.TargetPortalNotFound: # Discovery failed, on disconnect this will happen if we # are detaching a single pathed connection, so we use the # connection properties to return the tuple. if is_disconnect_call: return self._get_all_targets(connection_properties) raise except Exception: LOG.exception('Exception encountered during portal discovery') if 'target_portals' in connection_properties: raise exception.TargetPortalsNotFound( target_portals=connection_properties['target_portals']) if 'target_portal' in connection_properties: raise exception.TargetPortalNotFound( target_portal=connection_properties['target_portal']) raise if not connection_properties.get('target_iqns'): # There are two types of iSCSI multipath devices. One which # shares the same iqn between multiple portals, and the other # which use different iqns on different portals. # Try to identify the type by checking the iscsiadm output # if the iqn is used by multiple portals. If it is, it's # the former, so use the supplied iqn. Otherwise, it's the # latter, so try the ip,iqn combinations to find the targets # which constitutes the multipath device. main_iqn = connection_properties['target_iqn'] all_portals = {(ip, lun) for ip, iqn, lun in ips_iqns_luns} match_portals = {(ip, lun) for ip, iqn, lun in ips_iqns_luns if iqn == main_iqn} if len(all_portals) == len(match_portals): ips_iqns_luns = [(p[0], main_iqn, p[1]) for p in all_portals] return ips_iqns_luns def _get_potential_volume_paths(self, connection_properties: dict) -> list[str]: """Build a list of potential volume paths that exist. Given a list of target_portals in the connection_properties, a list of paths might exist on the system during discovery. This method's job is to build that list of potential paths for a volume that might show up. This is only used during get_volume_paths time, we are looking to find a list of existing volume paths for the connection_properties. In this case, we don't want to connect to the portal. If we blindly try and connect to a portal, it could create a new iSCSI session that didn't exist previously, and then leave it stale. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: list """ if self.use_multipath: LOG.info("Multipath discovery for iSCSI enabled") # Multipath installed, discovering other targets if available host_devices = self._get_device_path(connection_properties) else: LOG.info("Multipath discovery for iSCSI not enabled.") iscsi_sessions = self._get_iscsi_sessions() host_devices_set: set = set() for props in self._iterate_all_targets(connection_properties): # If we aren't trying to connect to the portal, we # want to find ALL possible paths from all of the # alternate portals if props['target_portal'] in iscsi_sessions: paths = self._get_device_path(props) host_devices_set.update(paths) host_devices = list(host_devices_set) return host_devices def set_execute(self, execute) -> None: super(ISCSIConnector, self).set_execute(execute) self._linuxscsi.set_execute(execute) def _validate_iface_transport(self, transport_iface: str) -> str: """Check that given iscsi_iface uses only supported transports Accepted transport names for provided iface param are be2iscsi, bnx2i, cxgb3i, cxgb4i, default, qla4xxx, ocs, iser or tcp. Note the difference between transport and iface; unlike default(iscsi_tcp)/iser, this is not one and the same for offloaded transports, where the default format is transport_name.hwaddress :param transport_iface: The iscsi transport type. :type transport_iface: str :returns: str """ # Note that default(iscsi_tcp) and iser do not require a separate # iface file, just the transport is enough and do not need to be # validated. This is not the case for the other entries in # supported_transports array. if transport_iface in ['default', 'iser']: return transport_iface # Will return (6) if iscsi_iface file was not found, or (2) if iscsid # could not be contacted out = self._run_iscsiadm_bare(['-m', 'iface', '-I', transport_iface], check_exit_code=[0, 2, 6])[0] or "" LOG.debug("iscsiadm %(iface)s configuration: stdout=%(out)s.", {'iface': transport_iface, 'out': out}) for data in [line.split() for line in out.splitlines()]: if data[0] == 'iface.transport_name': if data[2] in self.supported_transports: return transport_iface LOG.warning("No useable transport found for iscsi iface %s. " "Falling back to default transport.", transport_iface) return 'default' def _get_transport(self) -> str: return self.transport def _get_discoverydb_portals(self, connection_properties: dict) -> list[tuple]: """Retrieve iscsi portals information from the discoverydb. Example of discoverydb command output: SENDTARGETS: DiscoveryAddress: 192.168.1.33,3260 DiscoveryAddress: 192.168.1.2,3260 Target: iqn.2004-04.com.qnap:ts-831x:iscsi.cinder-20170531114245.9eff88 Portal: 192.168.1.3:3260,1 Iface Name: default Portal: 192.168.1.2:3260,1 Iface Name: default Target: iqn.2004-04.com.qnap:ts-831x:iscsi.cinder-20170531114447.9eff88 Portal: 192.168.1.3:3260,1 Iface Name: default Portal: 192.168.1.2:3260,1 Iface Name: default DiscoveryAddress: 192.168.1.38,3260 iSNS: No targets found. STATIC: No targets found. FIRMWARE: No targets found. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: list of tuples of (ip, iqn, lun) """ ip, port = connection_properties['target_portal'].rsplit(':', 1) # NOTE(geguileo): I don't know if IPv6 will be reported with [] # or not, so we'll make them optional. ip = ip.replace('[', r'\[?').replace(']', r'\]?') out = self._run_iscsiadm_bare(['-m', 'discoverydb', '-o', 'show', '-P', 1])[0] or "" regex = ''.join(('^SENDTARGETS:\n.*?^DiscoveryAddress: ', ip, ',', port, '.*?\n(.*?)^(?:DiscoveryAddress|iSNS):.*')) LOG.debug('Regex to get portals from discoverydb: %s', regex) info = re.search(regex, out, re.DOTALL | re.MULTILINE) ips = [] iqns = [] if info: iscsi_transport = ('iser' if self._get_transport() == 'iser' else 'default') iface = 'Iface Name: ' + iscsi_transport current_iqn = '' current_ip = '' for line in info.group(1).splitlines(): line = line.strip() if line.startswith('Target:'): current_iqn = line[8:] elif line.startswith('Portal:'): current_ip = line[8:].split(',')[0] elif line.startswith(iface): if current_iqn and current_ip: iqns.append(current_iqn) ips.append(current_ip) current_ip = '' if not iqns: raise exception.TargetPortalsNotFound( _('Unable to find target portals information on discoverydb.')) luns = self._get_luns(connection_properties, iqns) return list(zip(ips, iqns, luns)) def _discover_iscsi_portals(self, connection_properties: dict) -> list: out = None iscsi_transport = ('iser' if self._get_transport() == 'iser' else 'default') if connection_properties.get('discovery_auth_method'): try: self._run_iscsiadm_update_discoverydb(connection_properties, iscsi_transport) except putils.ProcessExecutionError as exception: # iscsiadm returns 6 for "db record not found" if exception.exit_code == 6: # Create a new record for this target and update the db self._run_iscsiadm_bare( ['-m', 'discoverydb', '-t', 'sendtargets', '-p', connection_properties['target_portal'], '-I', iscsi_transport, '--op', 'new'], check_exit_code=[0, 255]) self._run_iscsiadm_update_discoverydb( connection_properties ) else: LOG.error("Unable to find target portal: " "%(target_portal)s.", {'target_portal': connection_properties[ 'target_portal']}) raise old_node_startups = self._get_node_startup_values( connection_properties) out = self._run_iscsiadm_bare( ['-m', 'discoverydb', '-t', 'sendtargets', '-I', iscsi_transport, '-p', connection_properties['target_portal'], '--discover'], check_exit_code=[0, 255])[0] or "" self._recover_node_startup_values(connection_properties, old_node_startups) else: old_node_startups = self._get_node_startup_values( connection_properties) out = self._run_iscsiadm_bare( ['-m', 'discovery', '-t', 'sendtargets', '-I', iscsi_transport, '-p', connection_properties['target_portal']], check_exit_code=[0, 255])[0] or "" self._recover_node_startup_values(connection_properties, old_node_startups) ips, iqns = self._get_target_portals_from_iscsiadm_output(out) luns = self._get_luns(connection_properties, iqns) return list(zip(ips, iqns, luns)) def _run_iscsiadm_update_discoverydb( self, connection_properties: dict, iscsi_transport: str = 'default') -> tuple[str, str]: return self._execute( 'iscsiadm', '-m', 'discoverydb', '-t', 'sendtargets', '-I', iscsi_transport, '-p', connection_properties['target_portal'], '--op', 'update', '-n', "discovery.sendtargets.auth.authmethod", '-v', connection_properties['discovery_auth_method'], '-n', "discovery.sendtargets.auth.username", '-v', connection_properties['discovery_auth_username'], '-n', "discovery.sendtargets.auth.password", '-v', connection_properties['discovery_auth_password'], run_as_root=True, root_helper=self._root_helper) @utils.trace @base.synchronized('extend_volume', external=True) @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties: dict) -> Optional[int]: """Update the local kernel's size information. Try and update the local kernel's size information for an iSCSI volume. """ LOG.info("Extend volume for %s", strutils.mask_dict_password(connection_properties)) volume_paths = self.get_volume_paths(connection_properties) LOG.info("Found paths for volume %s", volume_paths) if volume_paths: return self._linuxscsi.extend_volume( volume_paths, use_multipath=self.use_multipath) else: LOG.warning("Couldn't find any volume paths on the host to " "extend volume for %(props)s", {'props': strutils.mask_dict_password( connection_properties)}) raise exception.VolumePathsNotFound() def supports_multipath(self): return self._linuxscsi.is_multipath_running( root_helper=self._root_helper) @utils.trace @utils.connect_volume_prepare_result @base.synchronized('connect_volume', external=True) def connect_volume( self, connection_properties: dict) -> Optional[dict[str, str]]: """Attach the volume to instance_name. :param connection_properties: The valid dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: dict connection_properties for iSCSI must include: target_portal(s) - ip and optional port target_iqn(s) - iSCSI Qualified Name target_lun(s) - LUN id of the volume Note that plural keys may be used when use_multipath=True """ self.check_multipath(connection_properties) try: if self.use_multipath: return self._connect_multipath_volume(connection_properties) return self._connect_single_volume(connection_properties) except Exception: # NOTE(geguileo): By doing the cleanup here we ensure we only do # the logins once for multipath if they succeed, but retry if they # don't, which helps on bad network cases. with excutils.save_and_reraise_exception(): self._cleanup_connection(connection_properties, force=True) return None def _get_connect_result(self, con_props: dict, wwn: str, devices_names: list[str], mpath: Optional[str] = None) -> dict[str, str]: device = '/dev/' + (mpath or devices_names[0]) result = {'type': 'block', 'scsi_wwn': wwn, 'path': device} if mpath: result['multipath_id'] = wwn return result @utils.retry((exception.VolumeDeviceNotFound)) def _connect_single_volume( self, connection_properties: dict) -> Optional[dict[str, str]]: """Connect to a volume using a single path.""" data: dict[str, Any] = {'stop_connecting': False, 'num_logins': 0, 'failed_logins': 0, 'stopped_threads': 0, 'found_devices': [], 'just_added_devices': []} for props in self._iterate_all_targets(connection_properties): self._connect_vol(self.device_scan_attempts, props, data) found_devs = data['found_devices'] if found_devs: for __ in range(10): wwn = self._linuxscsi.get_sysfs_wwn(found_devs) if wwn: break time.sleep(1) else: LOG.debug('Could not find the WWN for %s.', found_devs[0]) return self._get_connect_result(connection_properties, wwn, found_devs) # If we failed we must cleanup the connection, as we could be # leaving the node entry if it's not being used by another device. ips_iqns_luns = [(props['target_portal'], props['target_iqn'], props['target_lun']), ] self._cleanup_connection(props, ips_iqns_luns, force=True, ignore_errors=True) # Reset connection result values for next try data.update(num_logins=0, failed_logins=0, found_devices=[]) raise exception.VolumeDeviceNotFound(device='') def _connect_vol(self, rescans: int, props: dict, data: dict[str, Any]) -> None: """Make a connection to a volume, send scans and wait for the device. This method is specifically designed to support multithreading and share the results via a shared dictionary with fixed keys, which is thread safe. Since the heaviest operations are run via subprocesses we don't worry too much about the GIL or how the eventlets will handle the context switching. The method will only try to log in once, since iscsid's initiator already tries 8 times by default to do the login, or whatever value we have as node.session.initial_login_retry_max in our system. Shared dictionary has the following keys: - stop_connecting: When the caller wants us to stop the rescans - num_logins: Count of how many threads have successfully logged in - failed_logins: Count of how many threads have failed to log in - stopped_threads: How many threads have finished. This may be different than num_logins + failed_logins, since some threads may still be waiting for a device. - found_devices: List of devices the connections have found - just_added_devices: Devices that have been found and still have not been processed by the main thread that manages all the connecting threads. :param rescans: Number of rescans to perform before giving up. :param props: Properties of the connection. :param data: Shared data. """ device = hctl = None portal = props['target_portal'] try: session, manual_scan = self._connect_to_iscsi_portal(props) except Exception: LOG.exception('Exception connecting to %s', portal) session = None if session: do_scans = rescans > 0 or manual_scan # Scan is sent on connect by iscsid, but we must do it manually on # manual scan mode. This scan cannot count towards total rescans. if manual_scan: num_rescans = -1 seconds_next_scan = 0 else: num_rescans = 0 seconds_next_scan = 4 data['num_logins'] += 1 LOG.debug('Connected to %s', portal) while do_scans: try: if not hctl: hctl = self._linuxscsi.get_hctl(session, props['target_lun']) if hctl: if seconds_next_scan <= 0: num_rescans += 1 self._linuxscsi.scan_iscsi(*hctl) # 4 seconds on 1st rescan, 9s on 2nd, 16s on 3rd seconds_next_scan = (num_rescans + 2) ** 2 device = self._linuxscsi.device_name_by_hctl(session, hctl) if device: break except Exception: LOG.exception('Exception scanning %s', portal) pass do_scans = (num_rescans <= rescans and not (device or data['stop_connecting'])) if do_scans: time.sleep(1) seconds_next_scan -= 1 if device: LOG.debug('Connected to %s using %s', device, strutils.mask_password(props)) else: LOG.warning('LUN %(lun)s on iSCSI portal %(portal)s not found ' 'on sysfs after logging in.', {'lun': props['target_lun'], 'portal': portal}) else: LOG.warning('Failed to connect to iSCSI portal %s.', portal) data['failed_logins'] += 1 if device: data['found_devices'].append(device) data['just_added_devices'].append(device) data['stopped_threads'] += 1 @utils.retry((exception.VolumeDeviceNotFound)) def _connect_multipath_volume( self, connection_properties: dict) -> Optional[dict[str, str]]: """Connect to a multipathed volume launching parallel login requests. We will be doing parallel login requests, which will considerably speed up the process when we have flaky connections. We'll always try to return a multipath device even if there's only one path discovered, that way we can return once we have logged in in all the portals, because the paths will come up later. To make this possible we tell multipathd that the wwid is a multipath as soon as we have one device, and then hint multipathd to reconsider that volume for a multipath asking to add the path, because even if it's already known by multipathd it would have been discarded if it was the first time this volume was seen here. """ wwn: Optional[str] = None mpath = None wwn_added = False last_try_on = 0.0 found: list = [] just_added_devices: list = [] # Dict used to communicate with threads as detailed in _connect_vol data: ConnData = {'stop_connecting': False, 'num_logins': 0, 'failed_logins': 0, 'stopped_threads': 0, 'found_devices': found, 'just_added_devices': just_added_devices} ips_iqns_luns = self._get_ips_iqns_luns(connection_properties) # Launch individual threads for each session with the own properties retries = self.device_scan_attempts threads = [] for ip, iqn, lun in ips_iqns_luns: props = connection_properties.copy() props.update(target_portal=ip, target_iqn=iqn, target_lun=lun) # NOTE(yenai): The method _connect_vol is used for parallelize # logins, we shouldn't give these arguments; and it will make a # mess in the debug message in _connect_vol. So, kick them out: for key in ('target_portals', 'target_iqns', 'target_luns'): props.pop(key, None) threads.append(executor.Thread(target=self._connect_vol, args=(retries, props, data))) for thread in threads: thread.start() # Continue until: # - All connection attempts have finished and none has logged in # - Multipath has been found and connection attempts have either # finished or have already logged in # - We have finished in all threads, logged in, found some device, and # 10 seconds have passed, which should be enough with up to 10% # network package drops. while not ((len(ips_iqns_luns) == data['stopped_threads'] and not found) or (mpath and len(ips_iqns_luns) == data['num_logins'] + data['failed_logins'])): # We have devices but we don't know the wwn yet if not wwn and found: wwn = self._linuxscsi.get_sysfs_wwn(found, mpath) if not mpath and found: mpath = self._linuxscsi.find_sysfs_multipath_dm(found) # We have the wwn but not a multipath if wwn and not (mpath or wwn_added): # Tell multipathd that this wwn is a multipath and hint # multipathd to recheck all the devices we have just # connected. We only do this once, since for any new # device multipathd will already know it is a multipath. # This is only useful if we have multipathd configured with # find_multipaths set to yes, and has no effect if it's set # to no. wwn_added = self._linuxscsi.multipath_add_wwid(wwn) while not mpath and just_added_devices: device_path = '/dev/' + just_added_devices.pop(0) self._linuxscsi.multipath_add_path(device_path) mpath = self._linuxscsi.find_sysfs_multipath_dm(found) # Give some extra time after all threads have finished. if (not last_try_on and found and len(ips_iqns_luns) == data['stopped_threads']): LOG.debug('All connection threads finished, giving 10 seconds ' 'for dm to appear.') last_try_on = time.time() + 10 elif last_try_on and last_try_on < time.time(): break time.sleep(1) data['stop_connecting'] = True for thread in threads: thread.join() # If we haven't found any devices let the caller do the cleanup if not found: raise exception.VolumeDeviceNotFound(device='') # NOTE(geguileo): If we cannot find the dm it's because all paths are # really bad, so we might as well raise a not found exception, but # in our best effort we'll return a device even if it's probably # useless. if not mpath: LOG.warning('No dm was created, connection to volume is probably ' 'bad and will perform poorly.') else: # Wait for multipath device to be ready for I/O self._linuxscsi.wait_for_mpath_device(mpath) if not wwn: wwn = self._linuxscsi.get_sysfs_wwn(found, mpath) assert wwn is not None return self._get_connect_result(connection_properties, wwn, found, mpath) def _get_connection_devices( self, connection_properties: dict, ips_iqns_luns: Optional[list[tuple[str, str, str]]] = None, is_disconnect_call: bool = False) -> dict[set, set]: """Get map of devices by sessions from our connection. For each of the TCP sessions that correspond to our connection properties we generate a map of (ip, iqn) to (belong, other) where belong is a set of devices in that session that populated our system when we did a connection using connection properties, and other are any other devices that share that same session but are the result of connecting with different connection properties. We also include all nodes from our connection that don't have a session. If ips_iqns_luns parameter is provided connection_properties won't be used to get them. When doing multipath we may not have all the information on the connection properties (sendtargets was used on connect) so we may have to retrieve the info from the discoverydb. Call _get_ips_iqns_luns to do the right things. This method currently assumes that it's only called by the _cleanup_conection method. """ if not ips_iqns_luns: # This is a cleanup, don't do discovery ips_iqns_luns = self._get_ips_iqns_luns( connection_properties, discover=False, is_disconnect_call=is_disconnect_call) LOG.debug('Getting connected devices for (ips,iqns,luns)=%s', ips_iqns_luns) nodes = self._get_iscsi_nodes() sessions = self._get_iscsi_sessions_full() # Use (portal, iqn) to map the session value sessions_map = {(s[2], s[4]): s[1] for s in sessions if s[0] in self.VALID_SESSIONS_PREFIX} # device_map will keep a tuple with devices from the connection and # others that don't belong to this connection" (belong, others) device_map: defaultdict = defaultdict(lambda: (set(), set())) for ip, iqn, lun in ips_iqns_luns: session = sessions_map.get((ip, iqn)) # Our nodes that don't have a session will be returned as empty if not session: if (ip, iqn) in nodes: device_map[(ip, iqn)] = (set(), set()) continue # Get all devices for the session paths = glob.glob('/sys/class/scsi_host/host*/device/session' + session + '/target*/*:*:*:*/block/*') belong, others = device_map[(ip, iqn)] for path in paths: __, hctl, __, device = path.rsplit('/', 3) lun_path = int(hctl.rsplit(':', 1)[-1]) # For partitions turn them into the whole device: sde1 -> sde device = device.strip('0123456789') if lun_path == lun: belong.add(device) else: others.add(device) LOG.debug('Resulting device map %s', device_map) return device_map @utils.trace @base.synchronized('connect_volume', external=True) @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties: dict, device_info: dict, force: bool = False, ignore_errors: bool = False) -> None: """Detach the volume from instance_name. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict that must include: target_portal(s) - IP and optional port target_iqn(s) - iSCSI Qualified Name target_lun(s) - LUN id of the volume :param device_info: historical difference, but same as connection_props :type device_info: dict :param force: Whether to forcefully disconnect even if flush fails. :type force: bool :param ignore_errors: When force is True, this will decide whether to ignore errors or raise an exception once finished the operation. Default is False. :type ignore_errors: bool """ return self._cleanup_connection(connection_properties, force=force, ignore_errors=ignore_errors, device_info=device_info, is_disconnect_call=True) def _cleanup_connection( self, connection_properties: dict, ips_iqns_luns: Optional[list[tuple[Any, Any, Any]]] = None, force: bool = False, ignore_errors: bool = False, device_info: Optional[dict] = None, is_disconnect_call: bool = False) -> None: """Cleans up connection flushing and removing devices and multipath. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict that must include: target_portal(s) - IP and optional port target_iqn(s) - iSCSI Qualified Name target_lun(s) - LUN id of the volume :param ips_iqns_luns: Use this list of tuples instead of information from the connection_properties. :param force: Whether to forcefully disconnect even if flush fails. :type force: bool :param ignore_errors: When force is True, this will decide whether to ignore errors or raise an exception once finished the operation. Default is False. :param device_info: Attached device information. :param is_disconnect_call: Whether this is a call coming from a user disconnect_volume call or a call from some other operation's cleanup. :type is_disconnect_call: bool :type ignore_errors: bool """ exc = exception.ExceptionChainer() try: devices_map = self._get_connection_devices(connection_properties, ips_iqns_luns, is_disconnect_call) except exception.TargetPortalNotFound as target_exc: # When discovery sendtargets failed on connect there is no # information in the discoverydb, so there's nothing to clean. LOG.debug('Skipping cleanup %s', target_exc) return # Remove devices and multipath from this connection remove_devices = set() for remove, __ in devices_map.values(): remove_devices.update(remove) path_used = utils.get_dev_path(connection_properties, device_info) was_multipath = (path_used.startswith('/dev/dm-') or 'mpath' in path_used) multipath_name = self._linuxscsi.remove_connection( remove_devices, force, exc, path_used, was_multipath) # Disconnect sessions and remove nodes that are left without devices disconnect = [conn for conn, (__, keep) in devices_map.items() if not keep] self._disconnect_connection(connection_properties, disconnect, force, exc) # If flushing the multipath failed before, remove the multipath device # map from multipathd monitoring (only reaches here with multipath_name # if force=True). if multipath_name: LOG.debug('Removing multipath device map %s to stop multipathd ' 'from monitoring the device.', multipath_name) # We are passing sysfs mpath name here (dm-*) which is used # in the multipath_del_map method to fetch the DM name (multipath # device name i.e. SCSI WWID if user friendly names are OFF else # the configured user friendly name). with exc.context(force, 'Deleting map %s failed', multipath_name): self._linuxscsi.multipath_del_map(multipath_name) if exc: LOG.warning('There were errors removing %s, leftovers may remain ' 'in the system', remove_devices) if not ignore_errors: raise exc def _munge_portal( self, target: tuple[str, str, Union[list, str, int]]) -> \ tuple[str, str, list | str | int]: """Remove brackets from portal. In case IPv6 address was used the udev path should not contain any brackets. Udev code specifically forbids that. """ portal, iqn, lun = target return (portal.replace('[', '').replace(']', ''), iqn, self._linuxscsi.process_lun_id(lun)) def _get_device_path(self, connection_properties: dict) -> list: if self._get_transport() == "default": return ["/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" % self._munge_portal(x) for x in self._get_all_targets(connection_properties)] else: # we are looking for paths in the format : # /dev/disk/by-path/ # pci-XXXX:XX:XX.X-ip-PORTAL:PORT-iscsi-IQN-lun-LUN_ID device_list = [] for x in self._get_all_targets(connection_properties): look_for_device = glob.glob( '/dev/disk/by-path/*ip-%s-iscsi-%s-lun-%s' % self._munge_portal(x)) if look_for_device: device_list.extend(look_for_device) return device_list def get_initiator(self) -> Optional[str]: """Secure helper to read file as root.""" file_path = '/etc/iscsi/initiatorname.iscsi' try: lines, _err = self._execute('cat', file_path, run_as_root=True, root_helper=self._root_helper) for line in lines.split('\n'): if line.startswith('InitiatorName='): return line[line.index('=') + 1:].strip() except putils.ProcessExecutionError: LOG.warning("Could not find the iSCSI Initiator File %s", file_path) return None return None def _run_iscsiadm(self, connection_properties: dict, iscsi_command: tuple[str, ...], **kwargs) -> tuple[str, str]: check_exit_code = kwargs.pop('check_exit_code', 0) attempts = kwargs.pop('attempts', 1) delay_on_retry = kwargs.pop('delay_on_retry', True) (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', connection_properties['target_iqn'], '-p', connection_properties['target_portal'], *iscsi_command, run_as_root=True, root_helper=self._root_helper, check_exit_code=check_exit_code, attempts=attempts, delay_on_retry=delay_on_retry) msg = ("iscsiadm %(iscsi_command)s: stdout=%(out)s stderr=%(err)s" % {'iscsi_command': iscsi_command, 'out': out, 'err': err}) # don't let passwords be shown in log output LOG.debug(strutils.mask_password(msg)) return (out, err) def _iscsiadm_update(self, connection_properties: dict, property_key: str, property_value, **kwargs) -> tuple[str, str]: iscsi_command = ('--op', 'update', '-n', property_key, '-v', property_value) return self._run_iscsiadm(connection_properties, iscsi_command, **kwargs) def _get_target_portals_from_iscsiadm_output( self, output: str) -> tuple[list[str], list[str]]: # return both portals and iqns as 2 lists # # as we are parsing a command line utility, allow for the # possibility that additional debug data is spewed in the # stream, and only grab actual ip / iqn lines. ips = [] iqns = [] for data in [line.split() for line in output.splitlines()]: if len(data) == 2 and data[1].startswith('iqn.'): ips.append(data[0].split(',')[0]) iqns.append(data[1]) return ips, iqns def _connect_to_iscsi_portal( self, connection_properties: dict) -> tuple[Optional[str], Optional[bool]]: """Safely connect to iSCSI portal-target and return the session id.""" portal = connection_properties['target_portal'].split(",")[0] target_iqn = connection_properties['target_iqn'] lock_name = f'connect_to_iscsi_portal-{portal}-{target_iqn}' method = base.synchronized( lock_name, external=True)(self._connect_to_iscsi_portal_unsafe) return method(connection_properties) @utils.retry((exception.BrickException)) def _connect_to_iscsi_portal_unsafe( self, connection_properties: dict) -> tuple[Optional[str], Optional[bool]]: """Connect to an iSCSI portal-target an return the session id.""" portal = connection_properties['target_portal'].split(",")[0] target_iqn = connection_properties['target_iqn'] # NOTE(vish): If we are on the same host as nova volume, the # discovery makes the target so we don't need to # run --op new. Therefore, we check to see if the # target exists, and if we get 255 (Not Found), then # we run --op new. This will also happen if another # volume is using the same target. # iscsiadm returns 21 for "No records found" after version 2.0-871 LOG.info("Trying to connect to iSCSI portal %s", portal) out, err = self._run_iscsiadm(connection_properties, (), check_exit_code=(0, 21, 255)) if err: out_new, err_new = self._run_iscsiadm(connection_properties, ('--interface', self._get_transport(), '--op', 'new'), check_exit_code=(0, 6)) if err_new: # retry if iscsiadm returns 6 for "database failure" LOG.debug("Retrying to connect to iSCSI portal %s", portal) msg = (_("Encountered database failure for %s.") % (portal)) raise exception.BrickException(msg=msg) # Try to set the scan mode to manual res = self._iscsiadm_update(connection_properties, 'node.session.scan', 'manual', check_exit_code=False) manual_scan = not res[1] # Update global indicator of manual scan support used for # shared_targets locking so we support upgrading open iscsi to a # version supporting the manual scan feature without restarting Nova # or Cinder. initiator_utils.ISCSI_SUPPORTS_MANUAL_SCAN = manual_scan if connection_properties.get('auth_method'): self._iscsiadm_update(connection_properties, "node.session.auth.authmethod", connection_properties['auth_method']) self._iscsiadm_update(connection_properties, "node.session.auth.username", connection_properties['auth_username']) self._iscsiadm_update(connection_properties, "node.session.auth.password", connection_properties['auth_password']) # We exit once we are logged in or once we fail login while True: # Duplicate logins crash iscsiadm after load, so we scan active # sessions to see if the node is logged in. sessions = self._get_iscsi_sessions_full() for s in sessions: # Found our session, return session_id if (s[0] in self.VALID_SESSIONS_PREFIX and portal.lower() == s[2].lower() and s[4] == target_iqn): return str(s[1]), manual_scan try: # exit_code=15 means the session already exists, so it should # be regarded as successful login. self._run_iscsiadm(connection_properties, ("--login",), check_exit_code=(0, 15, 255)) except putils.ProcessExecutionError as p_err: LOG.warning('Failed to login iSCSI target %(iqn)s on portal ' '%(portal)s (exit code %(err)s).', {'iqn': target_iqn, 'portal': portal, 'err': p_err.exit_code}) return None, None self._iscsiadm_update(connection_properties, "node.startup", "automatic") def _disconnect_from_iscsi_portal(self, connection_properties: dict) -> None: self._iscsiadm_update(connection_properties, "node.startup", "manual", check_exit_code=[0, 21, 255]) self._run_iscsiadm(connection_properties, ("--logout",), check_exit_code=[0, 21, 255]) self._run_iscsiadm(connection_properties, ('--op', 'delete'), check_exit_code=[0, 21, 255], attempts=5, delay_on_retry=True) def _disconnect_connection(self, connection_properties: dict, connections: Iterable, force: bool, exc) -> None: LOG.debug('Disconnecting from: %s', connections) props = connection_properties.copy() for ip, iqn in connections: props['target_portal'] = ip props['target_iqn'] = iqn with exc.context(force, 'Disconnect from %s %s failed', ip, iqn): self._disconnect_from_iscsi_portal(props) def _run_iscsi_session(self) -> tuple[str, str]: (out, err) = self._run_iscsiadm_bare(('-m', 'session'), check_exit_code=[0, 21, 255]) LOG.debug("iscsi session list stdout=%(out)s stderr=%(err)s", {'out': out, 'err': err}) return (out, err) def _run_iscsiadm_bare(self, iscsi_command, **kwargs) -> tuple[str, str]: check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = self._execute('iscsiadm', *iscsi_command, run_as_root=True, root_helper=self._root_helper, check_exit_code=check_exit_code) LOG.debug("iscsiadm %(iscsi_command)s: stdout=%(out)s stderr=%(err)s", {'iscsi_command': iscsi_command, 'out': out, 'err': err}) return (out, err) def _run_multipath(self, multipath_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = self._execute('multipath', *multipath_command, run_as_root=True, root_helper=self._root_helper, check_exit_code=check_exit_code) LOG.debug("multipath %(multipath_command)s: " "stdout=%(out)s stderr=%(err)s", {'multipath_command': multipath_command, 'out': out, 'err': err}) return (out, err) def _get_node_startup_values(self, connection_properties): # Exit code 21 (ISCSI_ERR_NO_OBJS_FOUND) occurs when no nodes # exist - must consider this an empty (successful) result. out, __ = self._run_iscsiadm_bare( ['-m', 'node', '--op', 'show', '-p', connection_properties['target_portal']], check_exit_code=(0, 21)) or "" node_values_str = out.strip() node_values = node_values_str.split("\n") iqn = None startup = None startup_values = {} for node_value in node_values: node_keys = node_value.split() try: if node_keys[0] == "node.name": iqn = node_keys[2] elif node_keys[0] == "node.startup": startup = node_keys[2] if iqn and startup: startup_values[iqn] = startup iqn = None startup = None except IndexError: pass return startup_values def _recover_node_startup_values(self, connection_properties, old_node_startups): node_startups = self._get_node_startup_values(connection_properties) for iqn, node_startup in node_startups.items(): old_node_startup = old_node_startups.get(iqn, None) if old_node_startup and node_startup != old_node_startup: # _iscsiadm_update() only uses "target_portal" and "target_iqn" # of connection_properties. # And the recovering target belongs to the same target_portal # as discovering target. # So target_iqn is updated, and other values aren't updated. recover_connection = copy.deepcopy(connection_properties) recover_connection['target_iqn'] = iqn self._iscsiadm_update(recover_connection, "node.startup", old_node_startup) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/lightos.py0000664000175000017500000003407200000000000023420 0ustar00zuulzuul00000000000000# Copyright (C) 2016-2022 Lightbits Labs Ltd. # Copyright (C) 2020 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import http.client import os import re import tempfile import time import traceback from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import netutils import psutil from os_brick import exception from os_brick.i18n import _ from os_brick.initiator.connectors import base from os_brick.privileged import lightos as priv_lightos from os_brick import utils DEVICE_SCAN_ATTEMPTS_DEFAULT = 5 DISCOVERY_CLIENT_PORT = 6060 LOG = logging.getLogger(__name__) nvmec_pattern = ".*nvme[0-9]+[cp][0-9]+.*" nvmec_match = re.compile(nvmec_pattern) class LightOSConnector(base.BaseLinuxConnector): """Connector class to attach/detach LightOS volumes using NVMe/TCP.""" WAIT_DEVICE_TIMEOUT = 60 def __init__(self, root_helper, driver=None, execute=None, device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, message_queue=None, *args, **kwargs): super(LightOSConnector, self).__init__( root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) self.message_queue = message_queue self.DISCOVERY_DIR_PATH = '/etc/discovery-client/discovery.d/' @staticmethod def get_ip_addresses(): """Find all IPs for the host machine, return list of IP addresses.""" def get_ip_addresses_psutil(): ip_addresses = [] for interface, snics in psutil.net_if_addrs().items(): for snic in snics: # Collect each (interface, address) tuple ip_addresses.append((interface, snic.address)) return ip_addresses loop_back = ['lo'] ips = [] is_ipv6_enabled = netutils.is_ipv6_enabled() iface_with_ips = get_ip_addresses_psutil() for iface_ip_tuple in iface_with_ips: iface = iface_ip_tuple[0] ip = iface_ip_tuple[1] if iface in loop_back: continue if ip == "": continue if is_ipv6_enabled and netutils.is_valid_ipv6(ip): parts = ip.split("%") ip = parts[0] ips.append(ip) elif netutils.is_valid_ipv4(ip): ips.append(ip) return ips @staticmethod def get_connector_properties(root_helper, *args, **kwargs): """The LightOS connector properties.""" props = {} lightos_connector = LightOSConnector(root_helper=root_helper, message_queue=None, execute=kwargs.get('execute')) hostnqn = utils.get_host_nqn() found_dsc = lightos_connector.find_dsc() host_ips = lightos_connector.get_ip_addresses() LOG.info('Current host hostNQN %s and IP(s) are %s ', hostnqn, host_ips) if not found_dsc: LOG.debug('LIGHTOS: did not find dsc, continuing anyway.') if hostnqn: LOG.debug("LIGHTOS: finally hostnqn: %s dsc: %s", hostnqn, found_dsc) props['nqn'] = hostnqn props['found_dsc'] = found_dsc props['host_ips'] = host_ips else: LOG.debug('LIGHTOS: no hostnqn found.') return props def dsc_file_name(self, uuid): return os.path.join(self.DISCOVERY_DIR_PATH, "%s.conf" % uuid) def find_dsc(self): conn = http.client.HTTPConnection("localhost", DISCOVERY_CLIENT_PORT) try: conn.request("HEAD", "/metrics") resp = conn.getresponse() return 'found' if resp.status == http.client.OK else '' except Exception as e: LOG.debug('LIGHTOS: %s', e) out = '' return out def dsc_need_connect(self, connection_info): return not os.path.isfile(self.dsc_file_name(connection_info['uuid'])) def dsc_connect_volume(self, connection_info): if not self.dsc_need_connect(connection_info): return subsysnqn = connection_info['subsysnqn'] uuid = connection_info['uuid'] hostnqn = utils.get_host_nqn() with tempfile.NamedTemporaryFile(mode='w', delete=False) as dscfile: dscfile.write('# os_brick connector dsc file for LightOS' ' volume: {}\n'.format(uuid)) for (ip, node) in connection_info['lightos_nodes'].items(): transport = node['transport_type'] host = node['target_portal'] port = node['target_port'] dscfile.write('-t {} -a {} -s {} -q {} -n {}\n'.format( transport, host, port, hostnqn, subsysnqn)) dscfile.flush() try: dest_name = self.dsc_file_name(uuid) priv_lightos.move_dsc_file(dscfile.name, dest_name) except Exception: LOG.warning( "LIGHTOS: Failed to create dsc file for connection with " "uuid:%s", uuid) raise def dsc_disconnect_volume(self, connection_info): uuid = connection_info['uuid'] try: priv_lightos.delete_dsc_file(self.dsc_file_name(uuid)) except Exception: LOG.warning("LIGHTOS: Failed delete dsc file uuid:%s", uuid) raise def monitor_db(self, lightos_db): for connection_info in lightos_db.values(): self.dsc_connect_volume(connection_info) def monitor_message_queue(self, message_queue, lightos_db): while not message_queue.empty(): msg = message_queue.get() op, connection = msg LOG.debug("LIGHTOS: queue got op: %s, connection: %s", op, connection) if op == 'delete': LOG.info("LIGHTOS: Removing volume: %s from db", connection['uuid']) if connection['uuid'] in lightos_db: del lightos_db[connection['uuid']] else: LOG.warning("LIGHTOS: No volume: %s found in db", connection['uuid']) elif op == 'add': LOG.info("LIGHTOS: Adding volume: %s to db", connection['uuid']) lightos_db[connection['uuid']] = connection def lightos_monitor(self, lightos_db, message_queue): '''Bookkeeping lightos connections. This is useful when the connector is comming up to a running node with connected volumes already exists. This is used in the Nova driver to restore connections after reboot ''' first_time = True while True: self.monitor_db(lightos_db) # give us some time before trying to access the MQ # for the first time if first_time: time.sleep(5) first_time = False else: time.sleep(1) self.monitor_message_queue(message_queue, lightos_db) # This is part of our abstract interface def get_search_path(self): return '/dev' # This is part of our abstract interface def get_volume_paths(self, connection_properties): path = connection_properties['device_path'] return [path] def _check_device_exists_using_dev_lnk(self, uuid): lnk_path = f"/dev/disk/by-id/nvme-uuid.{uuid}" if os.path.exists(lnk_path): devname = os.path.realpath(lnk_path) if devname.startswith("/dev/nvme"): LOG.info("LIGHTOS: devpath %s detected for uuid %s", devname, uuid) return devname return None def _check_device_exists_reading_block_class(self, uuid): file_path = "/sys/class/block/*/wwid" wwid = "uuid." + uuid for match_path in glob.glob(file_path): try: with open(match_path, "r") as f: match_wwid = f.readline() except Exception: LOG.warning("LIGHTOS: Failed to read file %s", match_path) continue if wwid != match_wwid.strip(): continue # skip slave nvme devices, for example: nvme0c0n1 if nvmec_match.match(match_path.split("/")[-2]): continue LOG.info("LIGHTOS: matching uuid %s was found" " for device path %s", uuid, match_path) return os.path.join("/dev", match_path.split("/")[-2]) return None @utils.trace def _get_device_by_uuid(self, uuid): endtime = time.time() + self.WAIT_DEVICE_TIMEOUT while time.time() < endtime: try: device = self._check_device_exists_using_dev_lnk(uuid) if device: return device except Exception as e: LOG.debug('LIGHTOS: %s', e) device = self._check_device_exists_reading_block_class(uuid) if device: return device time.sleep(1) return None def _get_size_by_uuid(self, uuid): devpath = self._get_device_by_uuid(uuid) devname = devpath.split("/")[-1] try: size_path_name = os.path.join("/sys/class/block/", devname, "size") with open(size_path_name, "r") as f: size_blks = f.read().strip() bytesize = int(size_blks) * 512 return bytesize except Exception: LOG.warning("LIGHTOS: Could not find the size at for" " uuid %s in %s", uuid, devpath) return None @utils.trace @utils.connect_volume_prepare_result @base.synchronized('volume_op') def connect_volume(self, connection_properties): """Discover and attach the volume. :param connection_properties: The dictionary that describes all of the target volume attributes. connection_properties must include: nqn - NVMe subsystem name to the volume to be connected target_port - NVMe target port that hosts the nqn sybsystem target_portal - NVMe target ip that hosts the nqn sybsystem :type connection_properties: dict :returns: dict """ device_info = {'type': 'block'} uuid = connection_properties['uuid'] LOG.info("LIGHTOS: connect_volume called for volume %s, connection" " properties: %s", uuid, connection_properties) self.dsc_connect_volume(connection_properties) device_path = self._get_device_by_uuid(uuid) if not device_path: msg = _("Device with uuid %s did not show up" % uuid) priv_lightos.delete_dsc_file(self.dsc_file_name(uuid)) raise exception.BrickException(message=msg) device_info['path'] = device_path # bookkeeping lightos connections - add connection if self.message_queue: self.message_queue.put(('add', connection_properties)) return device_info @utils.trace @base.synchronized('volume_op') @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect a volume from the local host. The connection_properties are the same as from connect_volume. The device_info is returned from connect_volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict :param force: Whether to forcefully disconnect even if flush fails. :type force: bool :param ignore_errors: When force is True, this will decide whether to ignore errors or raise an exception once finished the operation. Default is False. :type ignore_errors: bool """ # bookkeeping lightos connections - delete connection if self.message_queue: self.message_queue.put(('delete', connection_properties)) uuid = connection_properties['uuid'] LOG.debug('LIGHTOS: disconnect_volume called for volume %s', uuid) device_path = self._get_device_by_uuid(uuid) exc = exception.ExceptionChainer() try: if device_path: self._linuxscsi.flush_device_io(device_path) except putils.ProcessExecutionError as e: exc.add_exception(type(e), e, traceback.format_exc()) if not (force or ignore_errors): raise try: self.dsc_disconnect_volume(connection_properties) except Exception as e: exc.add_exception(type(e), e, traceback.format_exc()) if exc: if not ignore_errors: raise exc @utils.trace @base.synchronized('volume_op') @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties): uuid = connection_properties['uuid'] new_size = self._get_size_by_uuid(uuid) return new_size ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/local.py0000664000175000017500000000571200000000000023040 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations from os_brick.i18n import _ from os_brick.initiator.connectors import base from os_brick import utils class LocalConnector(base.BaseLinuxConnector): """"Connector class to attach/detach File System backed volumes.""" def __init__(self, root_helper, driver=None, *args, **kwargs): super(LocalConnector, self).__init__(root_helper, driver=driver, *args, **kwargs) # type: ignore @staticmethod def get_connector_properties(root_helper: str, *args, **kwargs) -> dict: """The Local connector properties.""" return {} def get_volume_paths(self, connection_properties: dict) -> list[str]: path = connection_properties['device_path'] return [path] def get_search_path(self): return None def get_all_available_volumes(self, connection_properties=None): # TODO(walter-boring): not sure what to return here. return [] @utils.trace def connect_volume(self, connection_properties: dict) -> dict: """Connect to a volume. :param connection_properties: The dictionary that describes all of the target volume attributes. ``connection_properties`` must include: - ``device_path`` - path to the volume to be connected :type connection_properties: dict :returns: dict """ if 'device_path' not in connection_properties: msg = (_("Invalid connection_properties specified " "no device_path attribute")) raise ValueError(msg) device_info = {'type': 'local', 'path': connection_properties['device_path']} return device_info @utils.trace def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect a volume from the local host. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict """ pass def extend_volume(self, connection_properties): # TODO(walter-boring): is this possible? raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/nvmeof.py0000664000175000017500000016541400000000000023246 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Look in the NVMeOFConnProps class docstring to see the format of the NVMe-oF # connection properties from __future__ import annotations import errno import functools import glob import json import os.path import time from typing import (Callable, Optional, Sequence, Type, Union) import uuid as uuid_lib from oslo_concurrency import processutils as putils from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ from os_brick.initiator.connectors import base try: from os_brick.initiator.connectors import nvmeof_agent except ImportError: nvmeof_agent = None from os_brick.privileged import nvmeof as priv_nvmeof from os_brick.privileged import rootwrap as priv_rootwrap from os_brick import utils DEV_SEARCH_PATH = '/dev/' RAID_PATH = '/dev/md/' NVME_CTRL_SYSFS_PATH = '/sys/class/nvme-fabrics/ctl/' BLK_SYSFS_PATH = '/sys/class/block/' DEVICE_SCAN_ATTEMPTS_DEFAULT = 5 LOG = logging.getLogger(__name__) # ######################################################### # CONNECTION PROPERTIES KEYS start # Only present in the old connection info format OLD_NQN = 'nqn' TRANSPORT = 'transport_type' PORTAL = 'target_portal' PORT = 'target_port' # These were only present in the old connection info, but now we'll allow # both in the old format, the new format, and as part of a volume_replicas # element. NGUID = 'volume_nguid' NSID = 'ns_id' HOST_NQN = 'host_nqn' # Present in the new new connection info format UUID = 'vol_uuid' NQN = 'target_nqn' PORTALS = 'portals' ALIAS = 'alias' REPLICAS = 'volume_replicas' REPLICA_COUNT = 'replica_count' # CONNECTION PROPERTIES KEYS end # ######################################################### # ######################################################### # UTILITY METHODS start def ctrl_property(prop_name: str, ctrl_name: str) -> Optional[str]: """Get a sysfs property of an nvme controller.""" return sysfs_property(prop_name, NVME_CTRL_SYSFS_PATH + ctrl_name) def blk_property(prop_name: str, blk_name: str) -> Optional[str]: """Get a sysfs property of a block device.""" return sysfs_property(prop_name, BLK_SYSFS_PATH + blk_name) def sysfs_property(prop_name: str, path_or_name: str) -> Optional[str]: """Get sysfs property by path returning None if not possible.""" filename = os.path.join(path_or_name, prop_name) LOG.debug('Checking property at %s', filename) try: with open(filename, 'r') as f: result = f.read().strip() LOG.debug('Contents: %s', result) return result except (FileNotFoundError, IOError) as exc: # May happens on race conditions with device removals LOG.debug('Error reading file %s', exc) return None def nvme_basename(path: str) -> str: """Convert a sysfs control path into a namespace device. We can have a basic namespace devices such as nvme0n10 which is already in the desired form, but there's also channels when ANA is enabled on the kernel which have the form nvme0c2n10 which need to be converted to nvme0n10 to get the actual device. """ basename = os.path.basename(path) if 'c' not in basename: # nvme0n10 return basename # nvme0c1n10 ==> nvme0n10 ctrl, rest = basename.split('c', 1) ns = rest[rest.index('n'):] return ctrl + ns # UTILITY METHODS end # ######################################################### # ######################################################### # AUXILIARY CLASSES start class Portal(object): """Representation of an NVMe-oF Portal with some related operations.""" LIVE = 'live' MISSING = None # Unkown or not present in the system CONNECTING = 'connecting' # Default value of reconnect_delay in sysfs DEFAULT_RECONNECT_DELAY = 10 controller: Optional[str] = None # Don't know controller name on start def __str__(self) -> str: return (f'Portal {self.transport} at {self.address}:{self.port} ' f'(ctrl: {self.controller})') __repr__ = __str__ def __init__(self, parent_target: 'Target', address: str, port: Union[str, int], transport: str) -> None: self.parent_target = parent_target self.address = address self.port = str(port) # Convert the transport into our internal representation if transport in ('RoCEv2', 'rdma'): self.transport = 'rdma' else: self.transport = 'tcp' @property def is_live(self) -> bool: """Check if the portal is live. Not being live can mean many things, such as being connecting because the connection to the backend was lost, not knowing the controller name because we haven't searched for it, or not being connected to the backend. """ LOG.debug('Checking if %s is live', self) return self.state == self.LIVE @property def state(self) -> Optional[str]: """Return the state if the controller is known, None otherwise.""" # Does not automatically search for the controller if self.controller: return ctrl_property('state', self.controller) return None @property def reconnect_delay(self) -> int: # 10 seconds is the default value of reconnect_delay if self.controller: res = ctrl_property('reconnect_delay', self.controller) if res is not None: return int(res) return self.DEFAULT_RECONNECT_DELAY def get_device(self) -> Optional[str]: """Get a device path using available volume identification markers. Priority is given to the uuid, since that must be unique across ALL devices, then nguid which is backend specific but a backend can reuse in other connections after it has been disconnected, and then using the namespace id (nsid) which changes based on the subsystem and the moment the volume is connected. If the target in the connection information didn't have any of those identifiers, then let the parent Target instance try to figure out the device based on the devices that existed when we started connecting and the ones available now. None is returned when a device cannot be found. """ target = self.parent_target if target.uuid: return self.get_device_by_property('uuid', target.uuid) if target.nguid: return self.get_device_by_property('nguid', target.nguid) if target.ns_id: return self.get_device_by_property('nsid', target.ns_id) # Fallback to using the target to do the search LOG.warning('Using unreliable mechanism to find device: ' '"devices_on_start"') return target.get_device_path_by_initial_devices() def get_all_namespaces_ctrl_paths(self) -> list[str]: """Return all nvme sysfs control paths for this portal. The basename of the path can be single volume or a channel to an ANA volume. For example for the nvme1 controller we could return: ['/sys/class/nvme-fabrics/ctl/nvme1n1 ', '/sys/class/nvme-fabrics/ctl/nvme0c1n1'] """ if not self.controller: return [] # Look under the controller, where we will have normal devices and ANA # channel devices. For nvme1 we could find nvme1n1 or nvme0c1n1) return glob.glob(f'{NVME_CTRL_SYSFS_PATH}{self.controller}/nvme*') def get_device_by_property(self, prop_name: str, value: str) -> Optional[str]: """Look for a specific device (namespace) within a controller. Use a specific property to identify the namespace within the controller and returns the device path under /dev. Returns None if device is not found. """ LOG.debug('Looking for device where %s=%s on controller %s', prop_name, value, self.controller) for path in self.get_all_namespaces_ctrl_paths(): prop_value = sysfs_property(prop_name, path) if prop_value == value: # Convert path to the namespace device name result = DEV_SEARCH_PATH + nvme_basename(path) LOG.debug('Device found at %s, using %s', path, result) return result LOG.debug('Block %s is not the one we are looking for (%s != %s)', path, prop_value, value) LOG.debug('No device Found on controller %s', self.controller) return None def can_disconnect(self) -> bool: """Check if this portal can be disconnected. A portal can be disconnected if it is connected (has a controller name) and the subsystem has no namespaces left or if it has only one and it is from this target. """ if not self.controller: LOG.debug('Portal %s is not present', self) return False ns_ctrl_paths = self.get_all_namespaces_ctrl_paths() num_namespaces = len(ns_ctrl_paths) # No namespaces => disconnect, >1 ns => can't disconnect if num_namespaces != 1: result = not bool(num_namespaces) LOG.debug('There are %s namespaces on %s so we %s disconnect', num_namespaces, self, 'can' if result else 'cannot') return result # With only 1 namespace, check if it belongs to the portal # Get the device on this target's portal (may be None) portal_dev = os.path.basename(self.get_device() or '') result = portal_dev == nvme_basename(ns_ctrl_paths[0]) LOG.debug("The only namespace on portal %s is %s and %s this target's", self, portal_dev, "matches" if result else "doesn't match") return result class Target(object): """Representation of an NVMe-oF Target and some related operations.""" # Only used if the target has no uuid, nguid, or ns_id information devices_on_start = None # Cache the device we find for cases where we do retries _device = None def __str__(self) -> str: return f'Target {self.nqn} at {self.portals}' __repr__ = __str__ @classmethod def factory(cls: Type['Target'], source_conn_props: 'NVMeOFConnProps', target_nqn: str, portals: list[str], vol_uuid: Optional[str] = None, volume_nguid: Optional[str] = None, ns_id: Optional[str] = None, host_nqn: Optional[str] = None, find_controllers=False, **ignore) -> 'Target': """Create an instance from the connection properties keys. Extra keyword arguments are accepted (and ignored) for convenience, so they don't need to be removed when calling the factory. """ target = cls(source_conn_props, target_nqn, portals, vol_uuid, volume_nguid, ns_id, host_nqn, find_controllers) return target def __init__(self, source_conn_props: 'NVMeOFConnProps', nqn: str, portals: list[str], uuid: Optional[str] = None, nguid: Optional[str] = None, ns_id: Optional[str] = None, host_nqn=None, find_controllers=False) -> None: """Initialize instance. Portals are converted from a list of length 3 tuple/list into a list of Portal instances. The find_controllers parameter controls the search of the controller names in the system for each of the portals. """ self.source_conn_props = source_conn_props self.nqn = nqn self.portals = [Portal(self, *portal) for portal in portals] self.uuid = uuid and str(uuid_lib.UUID(uuid)) self.nguid = nguid and str(uuid_lib.UUID(nguid)) self.ns_id = ns_id self.host_nqn = host_nqn if find_controllers: self.set_portals_controllers() # This only happens with some old connection properties format, where # we may not have a way to identify the new volume and we'll have to # try to guess it looking at existing volumes before the attach. if not (uuid or nguid or ns_id): self.devices_on_start = self._get_nvme_devices() LOG.debug('Devices on start are: %s', self.devices_on_start) @staticmethod def _get_nvme_devices() -> list[str]: """Get all NVMe devices present in the system.""" pattern = '/dev/nvme*n*' # e.g. /dev/nvme10n10 return glob.glob(pattern) @property def live_portals(self) -> list[Portal]: """Get live portals. Must have called set_portals_controllers first since portals without a controller name will be skipped. """ return [p for p in self.portals if p.is_live] @property def present_portals(self) -> list[Portal]: """Get present portals. Must have called set_portals_controllers first since portals without a controller name will be skipped. """ return [p for p in self.portals if p.state is not None] def set_portals_controllers(self) -> None: """Search and set controller names in the target's portals. Compare the address, port, and transport protocol for each portal against existing nvme subsystem controllers. """ if all(p.controller for p in self.portals): # all have been found return hostnqn: Optional[str] = self.host_nqn or utils.get_host_nqn() # List of portal addresses and transports for this target # Unlike "nvme list-subsys -o json" sysfs addr is separated by a comma sysfs_portals: list[tuple[Optional[str], Optional[str], Optional[Union[str, utils.Anything]], Optional[Union[str, utils.Anything]]]] = [ (p.address, p.port, p.transport, hostnqn) for p in self.portals ] known_names: list[str] = [p.controller for p in self.portals if p.controller] warned = False LOG.debug('Search controllers for portals %s', sysfs_portals) ctrl_paths = glob.glob(NVME_CTRL_SYSFS_PATH + 'nvme*') for ctrl_path in ctrl_paths: ctrl_name = os.path.basename(ctrl_path) if ctrl_name in known_names: continue LOG.debug('Checking controller %s', ctrl_name) nqn = sysfs_property('subsysnqn', ctrl_path) if nqn != self.nqn: LOG.debug("Skipping %s, doesn't match %s", nqn, self.nqn) continue # The right subsystem, but must also be the right portal ctrl_transport = sysfs_property('transport', ctrl_path) # Address in sysfs may contain src_addr in some systems. Parse and # only use destination addr and port address = sysfs_property('address', ctrl_path) if not address: LOG.error("Couldn't read address for %s", ctrl_path) continue ctrl_address = dict((x.split('=') for x in address.split(','))) ctrl_addr = ctrl_address['traddr'] ctrl_port = ctrl_address['trsvcid'] # hostnqn value not present in all OSs. Ignore when not present ctrl_hostnqn = sysfs_property('hostnqn', ctrl_path) or utils.ANY # Warn once per target for OS not presenting the hostnqn on sysfs if ctrl_hostnqn is utils.ANY and not warned: LOG.warning("OS doesn't present the host nqn information. " "Controller may be incorrectly matched.") warned = True ctrl_portal = (ctrl_addr, ctrl_port, ctrl_transport, ctrl_hostnqn) try: index = sysfs_portals.index(ctrl_portal) LOG.debug('Found a valid portal at %s', ctrl_portal) # Update the portal with the found controller name self.portals[index].controller = ctrl_name known_names.append(ctrl_name) # One more controller found except ValueError: # If it's not one of our controllers ignore it LOG.debug('Skipping %s, not part of portals %s', ctrl_portal, sysfs_portals) # short circuit if no more portals to find if len(known_names) == len(sysfs_portals): return def get_devices(self, only_live=False, get_one=False) -> list[str]: """Return devices for this target Optionally only return devices from portals that are live and also optionally return on first device found. Returns an empty list when not found. """ LOG.debug('Looking for volume at %s', self.nqn) # Use a set because multiple portals can return the same device when # using ANA (even if we are not intentionally doing multipathing) result = set() portals = self.live_portals if only_live else self.present_portals for portal in portals: device = portal.get_device() if device: result.add(device) if get_one: break return list(result) # NOTE: Don't change to a property, as it would hide VolumeDeviceNotFound @utils.retry(exception.VolumeDeviceNotFound, retries=5) def find_device(self) -> str: """Search for a device that is on a live subsystem Must have called set_portals_controllers first since portals without a controller name will be skipped. Retries up to 5 times with exponential backoff to give time in case the subsystem is currently reconnecting. Raises VolumeDeviceNotFound when finally gives up trying. """ if not self._device: devices = self.get_devices(only_live=True, get_one=True) if not devices: raise exception.VolumeDeviceNotFound(device=self.nqn) self._device = devices[0] return self._device def get_device_path_by_initial_devices(self) -> Optional[str]: """Find target's device path from devices that were present before.""" ctrls = [p.controller for p in self.portals if p.controller] def discard(devices): """Discard devices that don't belong to our controllers.""" if not devices: return set() return set(dev for dev in devices if os.path.basename(dev).rsplit('n', 1)[0] in ctrls) current_devices = self._get_nvme_devices() LOG.debug('Initial devices: %s. Current devices %s. Controllers: %s', self.devices_on_start, current_devices, ctrls) devices = discard(current_devices) - discard(self.devices_on_start) if not devices: return None if len(devices) == 1: return devices.pop() # With multiple devices they must all have the same uuid if (len(devices) > 1 and 1 < len(set(blk_property('uuid', os.path.basename(d)) for d in devices))): msg = _('Too many different volumes found for %s') % ctrls LOG.error(msg) return None return devices.pop() # They are the same volume, return any of them class NVMeOFConnProps(object): """Internal representation of the NVMe-oF connection properties There is an old and a newer connection properties format, which result in 2 variants for replicated connections and 2 for non replicated: 1- New format with multiples replicas information 2- New format with single replica information 3- New format with no replica information 4- Original format Case #1 and #2 format: { 'vol_uuid': , 'alias': , 'volume_replicas': [ , ... ], 'replica_count': len(volume_replicas), } Where: cinder_volume_id ==> Cinder id, could be different from NVMe UUID. with/without hyphens, uppper/lower cased. target :== { 'target_nqn': , 'vol_uuid': , 'portals': [ , ... ], } nvme_volume_uuid ==> NVMe UUID. Can be different than cinder's id. With/without hyphens, uppper/lower cased portal ::= tuple/list( , , ) transport_type ::= 'RoCEv2' | # anything => tcp Case #3 format: ==> As defined in case #1 & #2 Case #4 format: { 'nqn': , 'transport_type': , 'target_portal': , 'target_port': , 'volume_nguid': , 'ns_id': , 'host_nqn': , } Where: transport_type ::= 'rdma' | 'tcp' volume_nguid ==> Optional, with/without hyphens, uppper/lower cased target_namespace_id ==> Optional connector_host_nqn> ==> Optional This class unifies the representation of all these in the following attributes: replica_count: None for non replicated alias: None for non replicated cinder_volume_id: None for non replicated is_replicated: True if replica count > 1, None if count = 1 else False targets: List of Target instances device_path: None if not present (it's set by Nova) In this unification case#4 is treated as case#3 where the vol_uuid is None and leaving all the additional information in the dictionary. This way non replicated cases are always handled in the same way and we have a common " definition for all cases: target :== { 'target_nqn': , 'vol_uuid': , 'portals': [ , ... ], 'volume_nguid': , 'ns_id': , 'host_nqn': , } new_portal ::= tuple/list( , , ) new_transport_type ::= 'rdma' | 'tcp' Portals change the transport_type to the internal representation where: 'RoCEv2' ==> 'rdma' ==> 'tcp' This means that the new connection format now accepts vol_uuid set to None, and accepts ns_id, volume_nguid, and host_nqn parameters, as described in the connect_volume docstring. """ RO = 'ro' RW = 'rw' replica_count = None cinder_volume_id: Optional[str] = None def __init__(self, conn_props: dict, find_controllers: bool = False) -> None: # Generic connection properties fields used by Nova self.qos_specs = conn_props.get('qos_specs') self.readonly = conn_props.get('access_mode', self.RW) == self.RO self.encrypted = conn_props.get('encrypted', False) self.cacheable = conn_props.get('cacheable', False) self.discard = conn_props.get('discard', False) self.enforce_multipath = conn_props.get('enforce_multipath', False) # old connection properties format if REPLICAS not in conn_props and NQN not in conn_props: LOG.debug('Converting old connection info to new format') conn_props[UUID] = None conn_props[NQN] = conn_props.pop(OLD_NQN) conn_props[PORTALS] = [(conn_props.pop(PORTAL), conn_props.pop(PORT), conn_props.pop(TRANSPORT))] # Leave other fields as they are: volume_nguid, ns_id, host_nqn # NVMe-oF specific fields below self.alias = conn_props.get(ALIAS) if REPLICAS in conn_props: self.replica_count = (conn_props[REPLICA_COUNT] or len(conn_props[REPLICAS])) self.is_replicated = True if self.replica_count > 1 else None targets = conn_props[REPLICAS] self.cinder_volume_id = str(uuid_lib.UUID(conn_props[UUID])) else: self.is_replicated = False targets = [conn_props] self.targets = [Target.factory(source_conn_props=self, find_controllers=find_controllers, **target) for target in targets] # Below fields may have been added by nova self.device_path = conn_props.get('device_path') def get_devices(self, only_live: bool = False) -> list[str]: """Get all device paths present in the system for all targets.""" result = [] for target in self.targets: result.extend(target.get_devices(only_live)) return result @classmethod def from_dictionary_parameter(cls: Type['NVMeOFConnProps'], func: Callable) -> Callable: """Decorator to convert connection properties dictionary. It converts the connection properties into a NVMeOFConnProps instance and finds the controller names for all portals present in the system. """ @functools.wraps(func) def wrapper(self, connection_properties, *args, **kwargs): conn_props = cls(connection_properties, find_controllers=True) return func(self, conn_props, *args, **kwargs) return wrapper # AUXILIARY CLASSES end # ######################################################### # ######################################################### # CONNECTOR CLASS start class NVMeOFConnector(base.BaseLinuxConnector): """Connector class to attach/detach NVMe-oF volumes.""" # Use a class attribute since a restart is needed to change it on the host native_multipath_supported = None # Time we think is more than reasonable to establish an NVMe-oF connection TIME_TO_CONNECT = 10 def __init__(self, root_helper: str, driver: Optional[base.host_driver.HostDriver] = None, use_multipath: bool = False, device_scan_attempts: int = DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs) -> None: super(NVMeOFConnector, self).__init__( root_helper, driver, device_scan_attemps=device_scan_attempts, *args, **kwargs) self.use_multipath = use_multipath self._set_native_multipath_supported() if self.use_multipath and not self.native_multipath_supported: LOG.warning('native multipath is not enabled') @staticmethod def get_search_path() -> str: """Necessary implementation for an os-brick connector.""" return DEV_SEARCH_PATH def get_volume_paths( self, connection_properties: NVMeOFConnProps, device_info: Optional[dict[str, str]] = None) -> list[str]: """Return paths where the volume is present.""" # Priority is on the value returned by connect_volume method if device_info and device_info.get('path'): return [device_info['path']] # Nova may add the path on the connection properties as a device_path device_path = connection_properties.device_path if device_path: return [device_path] # If we don't get the info on the connection properties it could be # problematic because we could have multiple devices and not know which # one we used. LOG.warning('We are being called without the path information!') # TODO: For raids it would be good to ensure they are actually there if connection_properties.is_replicated: if connection_properties.alias is None: raise exception.BrickException('Alias missing in connection ' 'info') return [RAID_PATH + connection_properties.alias] # TODO: Return live devices first? devices = connection_properties.get_devices() # If we are not sure if it's replicated or not, find out if connection_properties.is_replicated is None: if any(self._is_raid_device(dev) for dev in devices): if connection_properties.alias is None: raise exception.BrickException('Alias missing in ' 'connection info') return [RAID_PATH + connection_properties.alias] return devices # ####### Connector Properties methods ######## @classmethod def nvme_present(cls: type) -> bool: """Check if the nvme CLI is present.""" try: priv_rootwrap.custom_execute('nvme', 'version') return True except Exception as exc: if isinstance(exc, OSError) and exc.errno == errno.ENOENT: LOG.debug('nvme not present on system') else: LOG.warning('Unknown error when checking presence of nvme: %s', exc) return False @classmethod def get_connector_properties(cls, root_helper, *args, **kwargs) -> dict: """The NVMe-oF connector properties (initiator uuid and nqn.)""" execute = kwargs.get('execute') or priv_rootwrap.execute nvmf = NVMeOFConnector(root_helper=root_helper, execute=execute) ret = {} nqn = None hostid = None uuid = nvmf._get_host_uuid() suuid = priv_nvmeof.get_system_uuid() if cls.nvme_present(): nqn = utils.get_host_nqn(suuid) # Ensure /etc/nvme/hostid exists and defaults to the system uuid, # or a random value. hostid = utils.get_nvme_host_id(suuid) if hostid: ret['nvme_hostid'] = hostid if uuid: ret['uuid'] = uuid if suuid: ret['system uuid'] = suuid # compatibility if nqn: ret['nqn'] = nqn ret['nvme_native_multipath'] = cls._set_native_multipath_supported() return ret def _get_host_uuid(self) -> Optional[str]: """Get the UUID of the first mounted filesystem.""" cmd = ('findmnt', '-v', '/', '-n', '-o', 'SOURCE') try: lines, err = self._execute( *cmd, run_as_root=True, root_helper=self._root_helper) source = lines.split('\n')[0] # In a container this could be 'overlay', which causes the blkid # command to fail. if source == "overlay": return None blkid_cmd = ( 'blkid', source, '-s', 'UUID', '-o', 'value') lines, _err = self._execute( *blkid_cmd, run_as_root=True, root_helper=self._root_helper) return lines.split('\n')[0] except putils.ProcessExecutionError as e: LOG.warning( "Process execution error in _get_host_uuid: %s", e) return None @classmethod def _set_native_multipath_supported(cls): if cls.native_multipath_supported is None: cls.native_multipath_supported = \ cls._is_native_multipath_supported() return cls.native_multipath_supported @staticmethod def _is_native_multipath_supported(): try: with open('/sys/module/nvme_core/parameters/multipath', 'rt') as f: return f.read().strip() == 'Y' except Exception: LOG.warning("Could not find nvme_core/parameters/multipath") return False # ####### Connect Volume methods ######## def supports_multipath(self): return self.native_multipath_supported @utils.trace @utils.connect_volume_prepare_result @base.synchronized('connect_volume', external=True) @NVMeOFConnProps.from_dictionary_parameter def connect_volume( self, connection_properties: NVMeOFConnProps) -> dict[str, str]: """Attach and discover the volume.""" self.check_multipath( {'enforce_multipath': getattr( connection_properties, 'enforce_multipath', False)}) try: if connection_properties.is_replicated is False: LOG.debug('Starting non replicated connection') path = self._connect_target(connection_properties.targets[0]) else: # If we know it's replicated or we don't yet know LOG.debug('Starting replicated connection') path = self._connect_volume_replicated(connection_properties) except Exception: self._try_disconnect_all(connection_properties) raise return {'type': 'block', 'path': path} def _do_multipath(self): return self.use_multipath and self.native_multipath_supported @utils.retry(exception.VolumeDeviceNotFound, interval=2) def _connect_target(self, target: Target) -> str: """Attach a specific target to present a volume on the system If we are already connected to any of the portals (and it's live) we send a rescan (because the backend may not support AER messages), otherwise we iterate through the portals trying to do an nvme-of connection. This method assumes that the controllers for the portals have already been set. For example using the from_dictionary_parameter decorator in the NVMeOFConnProps class. Returns the path of the connected device. """ connected = False missing_portals = [] reconnecting_portals = [] for portal in target.portals: state = portal.state # store it so we read only once from sysfs # Rescan live controllers in case backend doesn't support AER if state == portal.LIVE: connected = True self.rescan(portal.controller) # type: ignore # Remember portals that are not present in the system elif state == portal.MISSING: missing_portals.append(portal) elif state == portal.CONNECTING: LOG.debug('%s is reconnecting', portal) reconnecting_portals.append(portal) # Ignore reconnecting/dead portals else: LOG.debug('%s exists but is %s', portal, state) # If no live portals exist or if we want to use multipath do_multipath = self._do_multipath() if do_multipath or not connected: for portal in missing_portals: cmd = ['connect', '-a', portal.address, '-s', portal.port, '-t', portal.transport, '-n', target.nqn, '-Q', '128', '-l', '-1'] if target.host_nqn: cmd.extend(['-q', target.host_nqn]) try: self.run_nvme_cli(cmd) connected = True except putils.ProcessExecutionError as exc: # In some nvme versions code 70 means target is already # connected, but in newer versions code is EALREADY. # Those should only happen if there is a race condition # because something is incorrectly configured (n-cpu and # c-vol running on same node with different lock paths) or # an admin is touching things manually. Not passing these # exit codes in check_exit_code parameter to _execute so we # can log it. nvme cli v2 returns 1, so we parse the # message. Some nvme cli versions return errors in stdout, # so we look in stderr and stdout. if not (exc.exit_code in (70, errno.EALREADY) or (exc.exit_code == 1 and 'already connected' in exc.stderr + exc.stdout)): LOG.error('Could not connect to %s: exit_code: %s, ' 'stdout: "%s", stderr: "%s",', portal, exc.exit_code, exc.stdout, exc.stderr) continue LOG.warning('Race condition with some other application ' 'when connecting to %s, please check your ' 'system configuration.', portal) state = portal.state if state == portal.LIVE: connected = True elif state == portal.CONNECTING: reconnecting_portals.append(portal) else: LOG.error('Ignoring %s due to unknown state (%s)', portal, state) if not do_multipath: break # We are connected if not connected and reconnecting_portals: delay = self.TIME_TO_CONNECT + max(p.reconnect_delay for p in reconnecting_portals) LOG.debug('Waiting %s seconds for some nvme controllers to ' 'reconnect', delay) timeout = time.time() + delay while time.time() < timeout: time.sleep(1) if any(p.is_live for p in reconnecting_portals): LOG.debug('Reconnected') connected = True break LOG.debug('No controller reconnected') if not connected: raise exception.VolumeDeviceNotFound(device=target.nqn) # Ensure controller names of new connections are set target.set_portals_controllers() dev_path = target.find_device() return dev_path @utils.trace def _connect_volume_replicated( self, connection_properties: NVMeOFConnProps) -> str: """Connect to a replicated volume and prepare the RAID Connection properties must contain all the necessary replica information, even if there is only 1 replica. Returns the /dev/md path Raises VolumeDeviceNotFound when cannot present the device in the system. """ host_device_paths = [] if not connection_properties.alias: raise exception.BrickException('Alias missing in connection info') for replica in connection_properties.targets: try: rep_host_device_path = self._connect_target(replica) host_device_paths.append(rep_host_device_path) except Exception as ex: LOG.error("_connect_target: %s", ex) if not host_device_paths: raise exception.VolumeDeviceNotFound( device=connection_properties.targets) if connection_properties.is_replicated: device_path = self._handle_replicated_volume( host_device_paths, connection_properties) else: device_path = self._handle_single_replica( host_device_paths, connection_properties.alias) if nvmeof_agent: nvmeof_agent.NVMeOFAgent.ensure_running(self) return device_path def _handle_replicated_volume(self, host_device_paths: list[str], conn_props: NVMeOFConnProps) -> str: """Assemble the raid from found devices.""" path_in_raid = False for dev_path in host_device_paths: path_in_raid = self._is_device_in_raid(dev_path) if path_in_raid: break device_path = RAID_PATH + conn_props.alias # type: ignore if path_in_raid: self.stop_and_assemble_raid(host_device_paths, device_path, False) else: paths_found = len(host_device_paths) if conn_props.replica_count > paths_found: # type: ignore LOG.error( 'Cannot create MD as %s out of %s legs were found.', paths_found, conn_props.replica_count) raise exception.VolumeDeviceNotFound(device=conn_props.alias) self.create_raid(host_device_paths, '1', conn_props.alias, # type: ignore conn_props.alias, # type: ignore False) return device_path def _handle_single_replica(self, host_device_paths: list[str], volume_alias: str) -> str: """Assemble the raid from a single device.""" if self._is_raid_device(host_device_paths[0]): md_path = RAID_PATH + volume_alias self.stop_and_assemble_raid(host_device_paths, md_path, False) return md_path return host_device_paths[0] # ####### Disconnect methods ######## @utils.trace @base.synchronized('connect_volume', external=True) @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties: dict, device_info: dict[str, str], force: bool = False, ignore_errors: bool = False) -> None: """Flush the volume. Disconnect of volumes happens on storage system side. Here we could remove connections to subsystems if no volumes are left. But new volumes can pop up asynchronously in the meantime. So the only thing left is flushing or disassembly of a correspondng RAID device. :param connection_properties: The dictionary that describes all of the target volume attributes as described in connect_volume but also with the "device_path" key containing the path to the volume that was connected (this is added by Nova). :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict """ # NOTE: Cannot use NVMeOFConnProps's decorator to create instance from # conn props because "connect_volume_undo_prepare_result" must be the # first decorator and it expects a dictionary. conn_props = NVMeOFConnProps(connection_properties) try: device_path = self.get_volume_paths(conn_props, device_info)[0] except IndexError: LOG.warning( "Cannot find the device for %s, assuming it's not there.", conn_props.cinder_volume_id or conn_props.targets[0].nqn) return exc = exception.ExceptionChainer() if not os.path.exists(device_path): LOG.warning("Trying to disconnect device %(device_path)s, but " "it is not connected. Skipping.", {'device_path': device_path}) return # We assume that raid devices are flushed when ending the raid if device_path.startswith(RAID_PATH): with exc.context(force, 'Failed to end raid %s', device_path): self.end_raid(device_path) else: with exc.context(force, 'Failed to flush %s', device_path): self._linuxscsi.flush_device_io(device_path) self._try_disconnect_all(conn_props, exc) if exc: LOG.warning('There were errors removing %s', device_path) if not ignore_errors: raise exc def _try_disconnect_all( self, conn_props: NVMeOFConnProps, exc: Optional[exception.ExceptionChainer] = None) -> None: """Disconnect all subsystems that are not being used. Only sees if it has to disconnect this connection properties portals, leaves other alone. Since this is unrelated to the flushing of the devices failures will be logged but they won't be raised. """ if exc is None: exc = exception.ExceptionChainer() for target in conn_props.targets: # Associate each portal with its controller name target.set_portals_controllers() for portal in target.portals: # Ignore exceptions to disconnect as many as possible. with exc.context(True, 'Failed to disconnect %s', portal): self._try_disconnect(portal) def _try_disconnect(self, portal: Portal) -> None: """Disconnect a specific subsystem if it's safe. Only disconnect if it has no namespaces left or has only one left and it is from this connection. """ LOG.debug('Checking if %s can be disconnected', portal) if portal.can_disconnect(): self._execute('nvme', 'disconnect', '-d', '/dev/' + portal.controller, # type: ignore root_helper=self._root_helper, run_as_root=True) # ####### Extend methods ######## @staticmethod def _get_sizes_from_lba(ns_data: dict) -> tuple[Optional[int], Optional[int]]: """Return size in bytes and the nsze of the volume from NVMe NS data. nsze is the namespace size that defines the total size of the namespace in logical blocks (LBA 0 through n-1), as per NVMe-oF specs. Returns a tuple of nsze and size """ try: lbads = ns_data['lbafs'][0]['ds'] # Don't know what to do with more than 1 LBA and as per NVMe specs # if LBADS < 9 then LBA is not supported if len(ns_data['lbafs']) != 1 or lbads < 9: LOG.warning("Cannot calculate new size with LBAs") return None, None nsze = ns_data['nsze'] new_size = nsze * (1 << lbads) except Exception: return None, None LOG.debug('New volume size is %s and nsze is %s', new_size, nsze) return nsze, new_size @utils.trace @base.synchronized('extend_volume', external=True) @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties: dict[str, str]) -> int: """Update an attached volume to reflect the current size after extend The only way to reflect the new size of an NVMe-oF volume on the host is a rescan, which rescans the whole subsystem. This is a problem on attach_volume and detach_volume, but not here, since we will have at least the namespace we are operating on in the subsystem. The tricky part is knowing when a rescan has already been completed and the volume size on sysfs is final. The rescan may already have happened before this method is called due to an AER message or we may need to trigger it here. Scans can be triggered manually with 'nvme ns-rescan' or writing 1 in configf's rescan file, or they can be triggered indirectly when calling the 'nvme list', 'nvme id-ns', or even using the 'nvme admin-passthru' command. Even after getting the new size with any of the NVMe commands above, we still need to wait until this is reflected on the host device, because we cannot return to the caller until the new size is in effect. If we don't see the new size taking effect on the system after 5 seconds, or if we cannot get the new size with nvme, then we rescan in the latter and in both cases we blindly wait 5 seconds and return whatever size is present. For replicated volumes, the RAID needs to be extended. """ # NOTE: Cannot use NVMeOFConnProps's decorator to create instance from # conn props because "connect_volume_undo_prepare_result" must be the # first decorator and it expects a dictionary. conn_props = NVMeOFConnProps(connection_properties) try: device_path = self.get_volume_paths(conn_props)[0] except IndexError: raise exception.VolumeDeviceNotFound() # Replicated needs to grow the raid, even if there's only 1 device if device_path.startswith(RAID_PATH): # NOTE: May not work without backend AER support and may have races self.run_mdadm(('mdadm', '--grow', '--size', 'max', device_path)) else: dev_name = os.path.basename(device_path) ctrl_name = dev_name.rsplit('n', 1)[0] nsze: Optional[Union[str, int]] = None try: # With many devices, id-ns command generates less data out, err = self._execute('nvme', 'id-ns', '-ojson', device_path, run_as_root=True, root_helper=self._root_helper) ns_data = json.loads(out) nsze, new_size = self._get_sizes_from_lba(ns_data) except Exception as exc: LOG.warning('Failed to get id-ns %s', exc) # Assume that nvme command failed, so didn't scan self.rescan(ctrl_name) if nsze: # Wait for the system to reflect the new size nsze = str(nsze) # To compare with contents of sysfs for x in range(10): # Wait 5 secs for size to appear in sysfs current_nsze = blk_property('size', dev_name) if current_nsze == nsze: return new_size # type: ignore LOG.debug('Sysfs size is still %s', current_nsze) time.sleep(0.5) LOG.warning('Timeout waiting for sysfs to reflect the right ' 'volume size.') # Last resort when id-ns failed or system didn't reflect new size LOG.info('Wait 5 seconds and return whatever size is present') time.sleep(5) size = utils.get_device_size(self, device_path) if size is None: raise exception.BrickException( 'get_device_size returned non-numeric size') return size # ####### RAID methods ######## def run_mdadm(self, cmd: Sequence[str], raise_exception: bool = False) -> Optional[str]: cmd_output = None try: lines, err = self._execute( *cmd, run_as_root=True, root_helper=self._root_helper) for line in lines.split('\n'): cmd_output = line break except putils.ProcessExecutionError as ex: LOG.warning("[!] Could not run mdadm: %s", str(ex)) if raise_exception: raise ex return cmd_output def _is_device_in_raid(self, device_path: str) -> bool: cmd = ['mdadm', '--examine', device_path] raid_expected = device_path + ':' try: lines, err = self._execute( *cmd, run_as_root=True, root_helper=self._root_helper) for line in lines.split('\n'): if line == raid_expected: return True else: return False except putils.ProcessExecutionError: pass return False @staticmethod def ks_readlink(dest: str) -> str: try: return os.readlink(dest) except Exception: return '' @staticmethod def get_md_name(device_name: str) -> Optional[str]: try: with open('/proc/mdstat', 'r') as f: lines = [line.split(' ')[0] for line in f if device_name in line] if lines: return lines[0] except Exception as exc: LOG.debug("[!] Could not find md name for %s in mdstat: %s", device_name, exc) return None def stop_and_assemble_raid(self, drives: list[str], md_path: str, read_only: bool) -> None: md_name = None i = 0 assembled = False link = '' while i < 5 and not assembled: for drive in drives: device_name = drive[5:] md_name = self.get_md_name(device_name) link = NVMeOFConnector.ks_readlink(md_path) if link != '': link = os.path.basename(link) if md_name and md_name == link: return LOG.debug( "sleeping 1 sec -allow auto assemble link = %(link)s " "md path = %(md_path)s", {'link': link, 'md_path': md_path}) time.sleep(1) if md_name and md_name != link: self.stop_raid(md_name) try: assembled = self.assemble_raid(drives, md_path, read_only) except Exception: i += 1 def assemble_raid(self, drives: list[str], md_path: str, read_only: bool) -> bool: cmd = ['mdadm', '--assemble', '--run', md_path] if read_only: cmd.append('-o') for i in range(len(drives)): cmd.append(drives[i]) try: self.run_mdadm(cmd, True) except putils.ProcessExecutionError as ex: LOG.warning("[!] Could not _assemble_raid: %s", str(ex)) raise ex return True def create_raid(self, drives: list[str], raid_type: str, device_name: str, name: str, read_only: bool) -> None: cmd = ['mdadm'] num_drives = len(drives) cmd.append('-C') if read_only: cmd.append('-o') cmd.append(device_name) cmd.append('-R') if name: cmd.append('-N') cmd.append(name) cmd.append('--level') cmd.append(raid_type) cmd.append('--raid-devices=' + str(num_drives)) cmd.append('--bitmap=internal') cmd.append('--homehost=any') cmd.append('--failfast') cmd.append('--assume-clean') for i in range(len(drives)): cmd.append(drives[i]) LOG.debug('[!] cmd = %s', cmd) self.run_mdadm(cmd) # sometimes under load, md is not created right away so we wait for i in range(60): try: is_exist = os.path.exists(RAID_PATH + name) LOG.debug("[!] md is_exist = %s", is_exist) if is_exist: return time.sleep(1) except Exception: LOG.debug('[!] Exception_wait_raid!') msg = _("md: /dev/md/%s not found.") % name LOG.error(msg) raise exception.NotFound(message=msg) def end_raid(self, device_path: str) -> None: raid_exists = self.is_raid_exists(device_path) if raid_exists: for i in range(10): try: cmd_out = self.stop_raid(device_path, True) if not cmd_out: break except Exception: time.sleep(1) try: is_exist = os.path.exists(device_path) LOG.debug("[!] is_exist = %s", is_exist) if is_exist: self.remove_raid(device_path) os.remove(device_path) except Exception: LOG.debug('[!] Exception_stop_raid!') def stop_raid(self, md_path: str, raise_exception: bool = False) -> Optional[str]: cmd = ['mdadm', '--stop', md_path] LOG.debug("[!] cmd = %s", cmd) cmd_out = self.run_mdadm(cmd, raise_exception) return cmd_out def is_raid_exists(self, device_path: str) -> bool: cmd = ['mdadm', '--detail', device_path] LOG.debug("[!] cmd = %s", cmd) raid_expected = device_path + ':' try: lines, err = self._execute( *cmd, run_as_root=True, root_helper=self._root_helper) for line in lines.split('\n'): LOG.debug("[!] line = %s", line) if line == raid_expected: return True else: return False except putils.ProcessExecutionError: pass return False def remove_raid(self, device_path: str) -> None: cmd = ['mdadm', '--remove', device_path] LOG.debug("[!] cmd = %s", cmd) self.run_mdadm(cmd) def _is_raid_device(self, device: str) -> bool: return self._get_fs_type(device) == 'linux_raid_member' def _get_fs_type(self, device_path: str) -> Optional[str]: cmd = ['blkid', device_path, '-s', 'TYPE', '-o', 'value'] LOG.debug("[!] cmd = %s", cmd) fs_type = None # We don't care about errors, on error lines will be '' so it's ok lines, err = self._execute( *cmd, run_as_root=True, root_helper=self._root_helper, check_exit_code=False) fs_type = lines.split('\n')[0] return fs_type or None # ####### NVMe methods ######## def run_nvme_cli(self, nvme_command: Sequence[str], **kwargs) -> tuple[str, str]: """Run an nvme cli command and return stdout and stderr output.""" (out, err) = self._execute('nvme', *nvme_command, run_as_root=True, root_helper=self._root_helper, check_exit_code=True) msg = ("nvme %(nvme_command)s: stdout=%(out)s stderr=%(err)s" % {'nvme_command': nvme_command, 'out': out, 'err': err}) LOG.debug("[!] %s", msg) return out, err def rescan(self, controller_name: str) -> None: """Rescan an nvme controller.""" nvme_command = ('ns-rescan', DEV_SEARCH_PATH + controller_name) try: self.run_nvme_cli(nvme_command) except Exception as e: raise exception.CommandExecutionFailed(e, cmd=nvme_command) # CONNECTOR CLASS end # ######################################################### ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/rbd.py0000664000175000017500000004431500000000000022517 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import os import tempfile import typing from typing import Any, Optional, Union if typing.TYPE_CHECKING: import io from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import fileutils from os_brick import exception from os_brick.i18n import _ from os_brick import initiator from os_brick.initiator.connectors import base from os_brick.initiator.connectors import base_rbd from os_brick.initiator import linuxrbd from os_brick.privileged import rbd as rbd_privsep from os_brick import utils LOG = logging.getLogger(__name__) class RBDConnector(base_rbd.RBDConnectorMixin, base.BaseLinuxConnector): """"Connector class to attach/detach RBD volumes.""" def __init__(self, root_helper, driver=None, use_multipath=False, device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): super(RBDConnector, self).__init__(root_helper, driver=driver, # type: ignore device_scan_attempts= device_scan_attempts, *args, **kwargs) self.do_local_attach = kwargs.get('do_local_attach', False) @staticmethod def get_connector_properties(root_helper, *args, **kwargs): """The RBD connector properties.""" return {'do_local_attach': kwargs.get('do_local_attach', False)} def get_volume_paths(self, connection_properties: dict[str, Any]) -> list[str]: # TODO(e0ne): Implement this for local volume. return [] def get_search_path(self) -> None: # TODO(walter-boring): don't know where the connector # looks for RBD volumes. return None def get_all_available_volumes( self, connection_properties: Optional[dict[str, Any]] = None) -> \ list[str]: # TODO(e0ne): Implement this for local volume. return [] @staticmethod def _check_or_get_keyring_contents(keyring: Optional[str], cluster_name: str, user: str) -> str: try: if keyring is None: if user: keyring_path = ("/etc/ceph/%s.client.%s.keyring" % (cluster_name, user)) with open(keyring_path, 'r') as keyring_file: keyring = keyring_file.read() else: keyring = '' return keyring except IOError: msg = (_("Keyring path %s is not readable.") % (keyring_path)) raise exception.BrickException(msg=msg) @classmethod def _create_ceph_conf(cls, monitor_ips: list[str], monitor_ports: list[str], cluster_name: str, user: str, keyring) -> str: monitors = ["%s:%s" % (ip, port) for ip, port in zip(cls._sanitize_mon_hosts(monitor_ips), monitor_ports)] mon_hosts = "mon_host = %s" % (','.join(monitors)) keyring = cls._check_or_get_keyring_contents(keyring, cluster_name, user) try: fd, ceph_conf_path = tempfile.mkstemp(prefix="brickrbd_") with os.fdopen(fd, 'w') as conf_file: # Bug #1865754 - '[global]' has been the appropriate # place for this stuff since at least Hammer, but in # Octopus (15.2.0+), Ceph began enforcing this. conf_file.writelines(["[global]", "\n", mon_hosts, "\n", keyring, "\n"]) return ceph_conf_path except IOError: msg = (_("Failed to write data to %s.") % (ceph_conf_path)) raise exception.BrickException(msg=msg) def _get_rbd_handle(self, connection_properties: dict[str, Any]) -> \ linuxrbd.RBDVolumeIOWrapper: try: user = connection_properties['auth_username'] pool, volume = connection_properties['name'].split('/') cluster_name = connection_properties['cluster_name'] monitor_ips = connection_properties['hosts'] monitor_ports = connection_properties['ports'] # NOTE: cinder no longer passes keyring data in the connection # properties as of the victoria release. See OSSN-0085. But # cinderlib does, so we must keep the code related to the keyring. keyring = connection_properties.get('keyring') except (KeyError, ValueError): msg = _("Connect volume failed, malformed connection properties.") raise exception.BrickException(msg=msg) conf = self._create_ceph_conf(monitor_ips, monitor_ports, str(cluster_name), user, keyring) try: rbd_client = linuxrbd.RBDClient(user, pool, conffile=conf, rbd_cluster_name=str(cluster_name)) rbd_volume = linuxrbd.RBDVolume(rbd_client, volume) rbd_handle = linuxrbd.RBDVolumeIOWrapper( linuxrbd.RBDImageMetadata(rbd_volume, pool, user, conf)) except Exception: fileutils.delete_if_exists(conf) raise return rbd_handle @staticmethod def get_rbd_device_name(pool: str, volume: str) -> str: """Return device name which will be generated by RBD kernel module. :param pool: RBD pool name. :type pool: string :param volume: RBD image name. :type volume: string """ return '/dev/rbd/{pool}/{volume}'.format(pool=pool, volume=volume) @classmethod def create_non_openstack_config( cls, connection_properties: dict[str, Any]): """Get root owned Ceph's .conf file for non OpenStack usage.""" # If keyring info is missing then we are in OpenStack, nothing to do keyring = connection_properties.get('keyring') if not keyring: return None try: user = connection_properties['auth_username'] pool, volume = connection_properties['name'].split('/') cluster_name = connection_properties['cluster_name'] monitor_ips = connection_properties['hosts'] monitor_ports = connection_properties['ports'] keyring = connection_properties.get('keyring') except (KeyError, ValueError): msg = _("Connect volume failed, malformed connection properties.") raise exception.BrickException(msg=msg) conf = rbd_privsep.root_create_ceph_conf(monitor_ips, monitor_ports, str(cluster_name), user, keyring) return conf def _local_attach_volume( self, connection_properties: dict[str, Any]) -> \ dict[str, Union[str, linuxrbd.RBDVolumeIOWrapper]]: # NOTE(e0ne): sanity check if ceph-common is installed. try: self._execute('which', 'rbd') except putils.ProcessExecutionError: msg = _("ceph-common package is not installed.") LOG.error(msg) raise exception.BrickException(message=msg) # NOTE(e0ne): map volume to a block device # via the rbd kernel module. pool, volume = connection_properties['name'].split('/') rbd_dev_path = self.get_rbd_device_name(pool, volume) # If we are not running on OpenStack, create config file conf = self.create_non_openstack_config(connection_properties) try: if ( not os.path.islink(rbd_dev_path) or not os.path.exists(os.path.realpath(rbd_dev_path)) ): # TODO(stephenfin): Update to the unified 'rbd device map' # command introduced in ceph 13.0 (commit 6a57358add1157629a6d) # when we drop support earlier versions cmd = ['rbd', 'map', volume, '--pool', pool] cmd += self._get_rbd_args(connection_properties, conf) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) else: LOG.debug( 'Volume %(vol)s is already mapped to local device %(dev)s', {'vol': volume, 'dev': os.path.realpath(rbd_dev_path)} ) if ( not os.path.islink(rbd_dev_path) or not os.path.exists(os.path.realpath(rbd_dev_path)) ): LOG.warning( 'Volume %(vol)s has not been mapped to local device ' '%(dev)s; is the udev daemon running and are the ' 'ceph-renamer udev rules configured? See bug #1884114 for ' 'more information.', {'vol': volume, 'dev': rbd_dev_path}, ) except Exception: # Cleanup conf file on failure with excutils.save_and_reraise_exception(): if conf: rbd_privsep.delete_if_exists(conf) res: dict[str, Union[str, linuxrbd.RBDVolumeIOWrapper]] res = {'path': rbd_dev_path, 'type': 'block'} if conf: res['conf'] = conf return res @utils.trace @utils.connect_volume_prepare_result def connect_volume(self, connection_properties: dict[str, Any]) -> \ dict[str, Union[linuxrbd.RBDVolumeIOWrapper, str]]: """Connect to a volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: dict """ do_local_attach = connection_properties.get('do_local_attach', self.do_local_attach) if do_local_attach: return self._local_attach_volume(connection_properties) rbd_handle = self._get_rbd_handle(connection_properties) return {'path': rbd_handle} def _find_root_device(self, connection_properties: dict[str, Any], conf) -> Optional[str]: """Find the underlying /dev/rbd* device for a mapping. Use the showmapped command to list all acive mappings and find the underlying /dev/rbd* device that corresponds to our pool and volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: '/dev/rbd*' or None if no active mapping is found. """ __, volume = connection_properties['name'].split('/') # TODO(stephenfin): Update to the unified 'rbd device list' # command introduced in ceph 13.0 (commit 6a57358add1157629a6d) # when we drop support earlier versions cmd = ['rbd', 'showmapped', '--format=json'] cmd += self._get_rbd_args(connection_properties, conf) (out, err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) # ceph v13.2.0 (Mimic) changed the output format of 'rbd showmapped' # from a dict of mappings keyed by ID to a simple list of mappings # https://docs.ceph.com/docs/master/releases/mimic/ # # before: # # { # "0": { # "pool":"volumes", # "namespace":"", # "name":"volume-6d54cb90-a5d1-40d8-9cb2-c6adf43a02af", # "snap":"-", # "device":"/dev/rbd0" # } # } # # after: # # [ # { # "id":"0", # "pool":"volumes", # "namespace":"", # "name":"volume-6d54cb90-a5d1-40d8-9cb2-c6adf43a02af", # "snap":"-", # "device":"/dev/rbd0" # } # ] # # TODO(stephenfin): Drop when we drop support for ceph 13.2.0 mappings = jsonutils.loads(out) if isinstance(mappings, dict): # yes, we're losing the ID field but we don't need it here mappings = mappings.values() for mapping in mappings: if mapping['name'] == volume: return mapping['device'] return None @utils.trace @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties: dict[str, Any], device_info: dict, force: bool = False, ignore_errors: bool = False) -> None: """Disconnect a volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict """ do_local_attach = connection_properties.get('do_local_attach', self.do_local_attach) if do_local_attach: conf = device_info.get('conf') if device_info else None root_device = self._find_root_device(connection_properties, conf) if root_device: # TODO(stephenfin): Update to the unified 'rbd device unmap' # command introduced in ceph 13.0 (commit 6a57358add1157629a6d) # when we drop support earlier versions cmd = ['rbd', 'unmap', root_device] cmd += self._get_rbd_args(connection_properties, conf) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if conf: rbd_privsep.delete_if_exists(conf) else: if device_info: rbd_handle = device_info.get('path', None) if rbd_handle is not None: fileutils.delete_if_exists(rbd_handle.rbd_conf) rbd_handle.close() @staticmethod def _check_valid_device(rbd_handle: 'io.BufferedReader') -> bool: original_offset = rbd_handle.tell() try: rbd_handle.read(4096) except Exception as e: LOG.error("Failed to access RBD device handle: %(error)s", {"error": e}) return False finally: rbd_handle.seek(original_offset, 0) return True def check_valid_device(self, path: Optional[str], run_as_root: bool = True) -> bool: """Verify an existing RBD handle is connected and valid.""" if not path: return False # We can receive a file handle or a path to a device if isinstance(path, str): if run_as_root: return rbd_privsep.check_valid_path(path) else: with open(path, 'rb') as rbd_handle: return self._check_valid_device(rbd_handle) # For backward compatibility ignore run_as_root param with handles return self._check_valid_device(path) @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties: dict[str, Any]) -> int: """Refresh local volume view and return current size in bytes.""" # Nothing to do, RBD attached volumes are automatically refreshed, but # we need to return the new size for compatibility do_local_attach = connection_properties.get('do_local_attach', self.do_local_attach) if not do_local_attach: handle = self._get_rbd_handle(connection_properties) try: # Handles should return absolute position on seek, but the RBD # wrapper doesn't, so we need to call tell afterwards handle.seek(0, 2) return handle.tell() finally: fileutils.delete_if_exists(handle.rbd_conf) handle.close() # Create config file when we do the attach on the host and not the VM conf = self.create_non_openstack_config(connection_properties) try: device_path = self._find_root_device(connection_properties, conf) finally: # If we have generated the config file we need to remove it if conf: try: rbd_privsep.delete_if_exists(conf) except Exception as exc: LOG.warning(_('Could not remove config file %(filename)s: ' '%(exc)s'), {'filename': conf, 'exc': exc}) if not device_path: msg = _('Cannot extend non mapped device.') raise exception.BrickException(msg=msg) device_name = os.path.basename(device_path) # ie: rbd0 device_number = device_name[3:] # ie: 0 # Get size from /sys/devices/rbd/0/size instead of # /sys/class/block/rbd0/size because the latter isn't updated with open('/sys/devices/rbd/' + device_number + '/size') as f: size_bytes = f.read().strip() return int(size_bytes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/remotefs.py0000664000175000017500000001202400000000000023564 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations from typing import Any, Callable from oslo_log import log as logging from os_brick import initiator from os_brick.initiator.connectors import base from os_brick.remotefs import remotefs from os_brick import utils LOG = logging.getLogger(__name__) class RemoteFsConnector(base.BaseLinuxConnector): """Connector class to attach/detach NFS and GlusterFS volumes.""" def __init__(self, mount_type, root_helper, driver=None, execute=None, device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): kwargs = kwargs or {} conn = kwargs.get('conn') mount_type_lower = mount_type.lower() if conn: mount_point_base = conn.get('mount_point_base') if mount_type_lower in ('nfs', 'glusterfs', 'scality', 'quobyte', 'vzstorage'): kwargs[mount_type_lower + '_mount_point_base'] = ( kwargs.get(mount_type_lower + '_mount_point_base') or mount_point_base) else: LOG.warning("Connection details not present." " RemoteFsClient may not initialize properly.") cls: Any if mount_type_lower == 'scality': cls = remotefs.ScalityRemoteFsClient elif mount_type_lower == 'vzstorage': cls = remotefs.VZStorageRemoteFSClient else: cls = remotefs.RemoteFsClient self._remotefsclient = cls(mount_type, root_helper, execute=execute, *args, **kwargs) super(RemoteFsConnector, self).__init__( root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) # type: ignore @staticmethod def get_connector_properties(root_helper, *args, **kwargs): """The RemoteFS connector properties.""" return {} def set_execute(self, execute: Callable) -> None: super(RemoteFsConnector, self).set_execute(execute) self._remotefsclient.set_execute(execute) def get_search_path(self) -> str: return self._remotefsclient.get_mount_base() def _get_volume_path(self, connection_properties: dict[str, Any]) -> str: mnt_flags = [] if connection_properties.get('options'): mnt_flags = connection_properties['options'].split() nfs_share = connection_properties['export'] self._remotefsclient.mount(nfs_share, mnt_flags) mount_point = self._remotefsclient.get_mount_point(nfs_share) path = mount_point + '/' + connection_properties['name'] return path def get_volume_paths(self, connection_properties: dict[str, Any]) -> list[str]: path = self._get_volume_path(connection_properties) return [path] @utils.trace def connect_volume( self, connection_properties: dict[str, Any]) -> dict[str, Any]: """Ensure that the filesystem containing the volume is mounted. :param connection_properties: The dictionary that describes all of the target volume attributes. connection_properties must include: export - remote filesystem device (e.g. '172.18.194.100:/var/nfs') name - file name within the filesystem :type connection_properties: dict :returns: dict connection_properties may optionally include: options - options to pass to mount """ path = self._get_volume_path(connection_properties) return {'path': path} @utils.trace def disconnect_volume(self, connection_properties: dict[str, Any], device_info: dict, force: bool = False, ignore_errors: bool = False) -> None: """No need to do anything to disconnect a volume in a filesystem. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict """ def extend_volume(self, connection_properties: dict[str, Any]): # TODO(walter-boring): is this possible? raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/scaleio.py0000664000175000017500000005233300000000000023366 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import urllib from oslo_log import log as logging import requests from os_brick import exception from os_brick.i18n import _ from os_brick import initiator from os_brick.initiator.connectors import base from os_brick.privileged import scaleio as priv_scaleio from os_brick import utils LOG = logging.getLogger(__name__) DEVICE_SCAN_ATTEMPTS_DEFAULT = 3 CONNECTOR_CONF_PATH = '/opt/emc/scaleio/openstack/connector.conf' def io(_type, nr): """Implementation of _IO macro from .""" return ioc(0x0, _type, nr, 0) def ioc(direction, _type, nr, size): """Implementation of _IOC macro from .""" return direction | (size & 0x1fff) << 16 | ord(_type) << 8 | nr class ScaleIOConnector(base.BaseLinuxConnector): """Class implements the connector driver for ScaleIO.""" OK_STATUS_CODE = 200 VOLUME_NOT_MAPPED_ERROR = 84 VOLUME_NOT_MAPPED_ERROR_v4 = 4039 VOLUME_ALREADY_MAPPED_ERROR = 81 VOLUME_ALREADY_MAPPED_ERROR_v4 = 4037 GET_GUID_OP_CODE = io('a', 14) RESCAN_VOLS_OP_CODE = io('a', 10) def __init__(self, root_helper, driver=None, device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): super(ScaleIOConnector, self).__init__( root_helper, driver=driver, device_scan_attempts=device_scan_attempts, *args, **kwargs ) self.local_sdc_ip = None self.server_ip = None self.server_port = None self.server_username = None self.server_password = None self.server_token = None self.volume_id = None self.volume_name = None self.volume_path = None self.iops_limit = None self.bandwidth_limit = None self.verify_certificate = None self.certificate_path = None def _get_guid(self): try: guid = priv_scaleio.get_guid(self.GET_GUID_OP_CODE) LOG.info("Current sdc guid: %s", guid) return guid except (IOError, OSError, ValueError) as e: msg = _("Error querying sdc guid: %s") % e LOG.error(msg) raise exception.BrickException(message=msg) @staticmethod def _get_password_token(connection_properties): # In old connection format we had the password and token in properties if 'serverPassword' in connection_properties: return (connection_properties['serverPassword'], connection_properties['serverToken']) # The new format reads password from file and doesn't have the token LOG.info("Get ScaleIO connector password from configuration file") try: password = priv_scaleio.get_connector_password( CONNECTOR_CONF_PATH, connection_properties['config_group'], connection_properties.get('failed_over', False)) return password, None except Exception as e: msg = _("Error getting ScaleIO connector password from " "configuration file: %s") % e LOG.error(msg) raise exception.BrickException(message=msg) def _rescan_vols(self): LOG.info("ScaleIO rescan volumes") try: priv_scaleio.rescan_vols(self.RESCAN_VOLS_OP_CODE) except (IOError, OSError) as e: msg = _("Error querying volumes: %s") % e LOG.error(msg) raise exception.BrickException(message=msg) @staticmethod def get_connector_properties(root_helper, *args, **kwargs): """The ScaleIO connector properties.""" return {} def get_search_path(self): return "/dev/disk/by-id" def get_volume_paths(self, connection_properties): self.get_config(connection_properties) volume_paths = [] device_paths = [self._find_volume_path()] for path in device_paths: if os.path.exists(path): volume_paths.append(path) return volume_paths def _find_volume_path(self): LOG.info( "Looking for volume %(volume_id)s, maximum tries: %(tries)s", {'volume_id': self.volume_id, 'tries': self.device_scan_attempts} ) # look for the volume in /dev/disk/by-id directory by_id_path = self.get_search_path() disk_filename = self._wait_for_volume_path(by_id_path) full_disk_name = ("%(path)s/%(filename)s" % {'path': by_id_path, 'filename': disk_filename}) LOG.info("Full disk name is %(full_path)s", {'full_path': full_disk_name}) return full_disk_name # NOTE: Usually 3 retries is enough to find the volume. # If there are network issues, it could take much longer. Set # the max retries to 15 to make sure we can find the volume. @utils.retry(exception.BrickException, retries=15, backoff_rate=1) def _wait_for_volume_path(self, path): if not os.path.isdir(path): msg = ( _("ScaleIO volume %(volume_id)s not found at " "expected path.") % {'volume_id': self.volume_id} ) LOG.debug(msg) raise exception.BrickException(message=msg) disk_filename = None filenames = os.listdir(path) LOG.info( "Files found in %(path)s path: %(files)s ", {'path': path, 'files': filenames} ) for filename in filenames: if (filename.startswith("emc-vol") and filename.endswith(self.volume_id)): disk_filename = filename break if not disk_filename: msg = (_("ScaleIO volume %(volume_id)s not found.") % {'volume_id': self.volume_id}) LOG.debug(msg) raise exception.BrickException(message=msg) return disk_filename def _get_client_id(self): request = ( "https://%(server_ip)s:%(server_port)s/" "api/types/Client/instances/getByIp::%(sdc_ip)s/" % { 'server_ip': self.server_ip, 'server_port': self.server_port, 'sdc_ip': self.local_sdc_ip } ) LOG.info("ScaleIO get client id by ip request: %(request)s", {'request': request}) r = requests.get( request, auth=(self.server_username, self.server_token), verify=self._verify_cert() ) r = self._check_response(r, request) sdc_id = r.json() if not sdc_id: msg = (_("Client with ip %(sdc_ip)s was not found.") % {'sdc_ip': self.local_sdc_ip}) raise exception.BrickException(message=msg) if r.status_code != 200 and "errorCode" in sdc_id: msg = (_("Error getting sdc id from ip %(sdc_ip)s: %(err)s") % {'sdc_ip': self.local_sdc_ip, 'err': sdc_id['message']}) LOG.error(msg) raise exception.BrickException(message=msg) LOG.info("ScaleIO sdc id is %(sdc_id)s.", {'sdc_id': sdc_id}) return sdc_id def _get_volume_id(self): volname_encoded = urllib.parse.quote(self.volume_name, '') volname_double_encoded = urllib.parse.quote(volname_encoded, '') LOG.debug(_( "Volume name after double encoding is %(volume_name)s."), {'volume_name': volname_double_encoded} ) request = ( "https://%(server_ip)s:%(server_port)s/api/types/Volume/instances" "/getByName::%(encoded_volume_name)s" % { 'server_ip': self.server_ip, 'server_port': self.server_port, 'encoded_volume_name': volname_double_encoded } ) LOG.info( "ScaleIO get volume id by name request: %(request)s", {'request': request} ) r = requests.get(request, auth=(self.server_username, self.server_token), verify=self._verify_cert()) r = self._check_response(r, request) volume_id = r.json() if not volume_id: msg = (_("Volume with name %(volume_name)s wasn't found.") % {'volume_name': self.volume_name}) LOG.error(msg) raise exception.BrickException(message=msg) if r.status_code != self.OK_STATUS_CODE and "errorCode" in volume_id: msg = ( _("Error getting volume id from name %(volume_name)s: " "%(err)s") % {'volume_name': self.volume_name, 'err': volume_id['message']} ) LOG.error(msg) raise exception.BrickException(message=msg) LOG.info("ScaleIO volume id is %(volume_id)s.", {'volume_id': volume_id}) return volume_id def _check_response(self, response, request, is_get_request=True, params=None): if response.status_code == 401 or response.status_code == 403: LOG.info("Token is invalid, " "going to re-login to get a new one") login_request = ( "https://%(server_ip)s:%(server_port)s/api/login" % {'server_ip': self.server_ip, 'server_port': self.server_port} ) r = requests.get( login_request, auth=(self.server_username, self.server_password), verify=self._verify_cert() ) token = r.json() # repeat request with valid token LOG.debug(_("Going to perform request %(request)s again " "with valid token"), {'request': request}) if is_get_request: res = requests.get(request, auth=(self.server_username, token), verify=self._verify_cert()) else: headers = {'content-type': 'application/json'} res = requests.post( request, data=json.dumps(params), headers=headers, auth=(self.server_username, token), verify=self._verify_cert() ) self.server_token = token return res return response def _verify_cert(self): verify_cert = self.verify_certificate if self.verify_certificate and self.certificate_path: verify_cert = self.certificate_path return verify_cert def get_config(self, connection_properties): self.local_sdc_ip = connection_properties['hostIP'] self.volume_name = connection_properties['scaleIO_volname'] # instances which were created before Newton release don't have # 'scaleIO_volume_id' property, in such cases connector will resolve # volume_id from volname self.volume_id = connection_properties.get('scaleIO_volume_id') self.server_ip = connection_properties['serverIP'] self.server_port = connection_properties['serverPort'] self.server_username = connection_properties['serverUsername'] self.server_password, server_token = self._get_password_token( connection_properties) if server_token: self.server_token = server_token self.iops_limit = connection_properties['iopsLimit'] self.bandwidth_limit = connection_properties['bandwidthLimit'] self.verify_certificate = ( connection_properties.get('verify_certificate') ) self.certificate_path = connection_properties.get('certificate_path') device_info = {'type': 'block', 'path': self.volume_path} return device_info @utils.trace @utils.connect_volume_prepare_result @base.synchronized('scaleio', 'scaleio-', external=True) def connect_volume(self, connection_properties): """Connect the volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: dict """ device_info = self.get_config(connection_properties) LOG.debug( _( "scaleIO Volume name: %(volume_name)s, SDC IP: %(sdc_ip)s, " "REST Server IP: %(server_ip)s, " "REST Server username: %(username)s, " "iops limit: %(iops_limit)s, " "bandwidth limit: %(bandwidth_limit)s." ), { 'volume_name': self.volume_name, 'volume_id': self.volume_id, 'sdc_ip': self.local_sdc_ip, 'server_ip': self.server_ip, 'username': self.server_username, 'iops_limit': self.iops_limit, 'bandwidth_limit': self.bandwidth_limit } ) guid = self._get_guid() params = {'guid': guid, 'allowMultipleMappings': 'TRUE'} self.volume_id = self.volume_id or self._get_volume_id() headers = {'content-type': 'application/json'} request = ( "https://%(server_ip)s:%(server_port)s/api/instances/" "Volume::%(volume_id)s/action/addMappedSdc" % {'server_ip': self.server_ip, 'server_port': self.server_port, 'volume_id': self.volume_id} ) LOG.info("map volume request: %(request)s", {'request': request}) r = requests.post( request, data=json.dumps(params), headers=headers, auth=(self.server_username, self.server_token), verify=self._verify_cert() ) r = self._check_response(r, request, False, params) if r.status_code != self.OK_STATUS_CODE: response = r.json() error_code = response['errorCode'] if error_code == self.VOLUME_ALREADY_MAPPED_ERROR or \ error_code == self.VOLUME_ALREADY_MAPPED_ERROR_v4: LOG.warning( "Ignoring error mapping volume %(volume_name)s: " "volume already mapped.", {'volume_name': self.volume_name} ) else: msg = ( _("Error mapping volume %(volume_name)s: %(err)s") % {'volume_name': self.volume_name, 'err': response['message']} ) LOG.error(msg) raise exception.BrickException(message=msg) self.volume_path = self._find_volume_path() device_info['path'] = self.volume_path # Set QoS settings after map was performed if self.iops_limit is not None or self.bandwidth_limit is not None: params = {'guid': guid} if self.bandwidth_limit is not None: params['bandwidthLimitInKbps'] = self.bandwidth_limit if self.iops_limit is not None: params['iopsLimit'] = self.iops_limit request = ( "https://%(server_ip)s:%(server_port)s/api/instances/" "Volume::%(volume_id)s/action/setMappedSdcLimits" % {'server_ip': self.server_ip, 'server_port': self.server_port, 'volume_id': self.volume_id} ) LOG.info("Set client limit request: %(request)s", {'request': request}) r = requests.post( request, data=json.dumps(params), headers=headers, auth=(self.server_username, self.server_token), verify=self._verify_cert() ) r = self._check_response(r, request, False, params) if r.status_code != self.OK_STATUS_CODE: response = r.json() LOG.info("Set client limit response: %(response)s", {'response': response}) msg = ( _("Error setting client limits for volume " "%(volume_name)s: %(err)s") % {'volume_name': self.volume_name, 'err': response['message']} ) LOG.error(msg) return device_info @utils.trace @base.synchronized('scaleio', 'scaleio-', external=True) @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect the ScaleIO volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict :type force: bool :param ignore_errors: When force is True, this will decide whether to ignore errors or raise an exception once finished the operation. Default is False. """ self.get_config(connection_properties) self.volume_id = self.volume_id or self._get_volume_id() LOG.info( "ScaleIO disconnect volume in ScaleIO brick volume driver." ) LOG.debug( _("ScaleIO Volume name: %(volume_name)s, SDC IP: %(sdc_ip)s, " "REST Server IP: %(server_ip)s"), {'volume_name': self.volume_name, 'sdc_ip': self.local_sdc_ip, 'server_ip': self.server_ip} ) guid = self._get_guid() params = {'guid': guid} headers = {'content-type': 'application/json'} request = ( "https://%(server_ip)s:%(server_port)s/api/instances/" "Volume::%(volume_id)s/action/removeMappedSdc" % {'server_ip': self.server_ip, 'server_port': self.server_port, 'volume_id': self.volume_id} ) LOG.info("Unmap volume request: %(request)s", {'request': request}) r = requests.post( request, data=json.dumps(params), headers=headers, auth=(self.server_username, self.server_token), verify=self._verify_cert() ) r = self._check_response(r, request, False, params) if r.status_code != self.OK_STATUS_CODE: response = r.json() error_code = response['errorCode'] if error_code == self.VOLUME_NOT_MAPPED_ERROR or \ error_code == self.VOLUME_NOT_MAPPED_ERROR_v4: LOG.warning( "Ignoring error unmapping volume %(volume_id)s: " "volume not mapped.", {'volume_id': self.volume_name} ) else: msg = (_("Error unmapping volume %(volume_id)s: %(err)s") % {'volume_id': self.volume_name, 'err': response['message']}) LOG.error(msg) raise exception.BrickException(message=msg) else: if 'device_path' in connection_properties: path = connection_properties['device_path'] self._wait_for_remove_volume_path(path) @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties): """Update the local kernel's size information. Try and update the local kernel's size information for a ScaleIO volume. """ self._rescan_vols() volume_paths = self.get_volume_paths(connection_properties) if volume_paths: return utils.get_device_size(self, volume_paths[0]) # if we got here, the volume is not mapped msg = (_("Error extending ScaleIO volume")) LOG.error(msg) raise exception.BrickException(message=msg) # NOTE: Usually 5 retries is enough to find the volume. # If there are network issues, it could take much longer. Set # the max retries to 15 to make sure we can find the volume. @utils.retry(exception.BrickException, retries=15, backoff_rate=1) def _wait_for_remove_volume_path(self, path): if os.path.exists(path): msg = (_("ScaleIO volume %(volume_id)s found " "after disconnect operation at path %(path)s") % {'volume_id': self.volume_id, 'path': path}) LOG.debug(msg) raise exception.BrickException(message=msg) else: LOG.info("ScaleIO disconnect volume %(volume_id)s " "removed at path %(path)s.", {'volume_id': self.volume_id, 'path': path}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/storpool.py0000664000175000017500000003223700000000000023631 0ustar00zuulzuul00000000000000# Copyright (c) 2015 - 2017 StorPool # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pathlib import time from oslo_log import log as logging from os_brick import exception from os_brick.initiator.connectors import base from os_brick.initiator import storpool_utils from os_brick import utils LOG = logging.getLogger(__name__) DEV_STORPOOL = pathlib.Path('/dev/storpool') DEV_STORPOOL_BYID = pathlib.Path('/dev/storpool-byid') def path_to_volname(path: pathlib.Path) -> str: """Determine a volume name to pass to the StorPool API.""" if path.parent == DEV_STORPOOL_BYID: return f"~{path.name}" if path.parent == DEV_STORPOOL: return path.name raise exception.BrickException('Unexpected device path %(path)s' % {'path': path}) class StorPoolConnector(base.BaseLinuxConnector): """"Connector class to attach/detach StorPool volumes.""" def __init__(self, root_helper, driver=None, *args, **kwargs): super(StorPoolConnector, self).__init__(root_helper, driver=driver, *args, **kwargs) try: self._config = storpool_utils.get_conf() self._sp_api = storpool_utils.StorPoolAPI( self._config["SP_API_HTTP_HOST"], self._config["SP_API_HTTP_PORT"], self._config["SP_AUTH_TOKEN"]) self._volume_prefix = self._config.get( "SP_OPENSTACK_VOLUME_PREFIX", "os") except Exception as e: raise exception.BrickException( 'Could not initialize the StorPool API: %s' % (e)) if "SP_OURID" not in self._config: raise exception.BrickException( 'Could not read "SP_OURID" from the StorPool configuration"') def _detach_retry(self, sp_ourid, volume): """Retry detaching. Retries attempt to handle LUKS tests-related failures: busy: volume ... open at ... """ count = 10 while True: try: force = count == 0 self._sp_api.volumes_reassign_wait( { "reassign": [{ "volume": volume, "detach": [sp_ourid], "force": force, }] } ) break except storpool_utils.StorPoolAPIError as exc: if ( exc.name in ("busy", "invalidParam") and "is open at" in exc.desc ): assert count > 0 time.sleep(0.2) count -= 1 else: raise @staticmethod def get_connector_properties(root_helper, *args, **kwargs): """The StorPool connector properties.""" return {} @utils.connect_volume_prepare_result def connect_volume(self, connection_properties): """Connect to a volume. :param connection_properties: The dictionary that describes all of the target volume attributes; it needs to contain the StorPool 'client_id' and the common 'volume' and 'access_mode' values. :type connection_properties: dict :returns: dict """ client_id = connection_properties.get('client_id', None) if client_id is None: raise exception.BrickException( 'Invalid StorPool connection data, no client ID specified.') volume_id = connection_properties.get('volume', None) if volume_id is None: raise exception.BrickException( 'Invalid StorPool connection data, no volume ID specified.') volume = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume_id) mode = connection_properties.get('access_mode', None) if mode is None or mode not in ('rw', 'ro'): raise exception.BrickException( 'Invalid access_mode specified in the connection data.') try: sp_ourid = self._config["SP_OURID"] except KeyError: raise exception.BrickException( 'SP_OURID missing, cannot connect volume %s' % volume_id) try: self._sp_api.volumes_reassign_wait( {"reassign": [{"volume": volume, mode: [sp_ourid]}]}) except Exception as exc: raise exception.BrickException( 'Communication with the StorPool API ' 'failed: %s' % (exc)) from exc try: volume_info = self._sp_api.volume_get_info(volume) sp_global_id = volume_info['globalId'] except Exception as exc: raise exception.BrickException( 'Communication with the StorPool API ' 'failed: %s' % (exc)) from exc return {'type': 'block', 'path': str(DEV_STORPOOL_BYID) + '/' + sp_global_id} @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect a volume from the local host. The connection_properties are the same as from connect_volume. The device_info is returned from connect_volume. :param connection_properties: The dictionary that describes all of the target volume attributes; it needs to contain the StorPool 'client_id' and the common 'volume' values. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict :param force: Whether to forcefully disconnect even if flush fails. For StorPool, this parameter is ignored, the volume is always detached. :type force: bool :param ignore_errors: When force is True, this will decide whether to ignore errors or raise an exception once finished the operation. Default is False. For StorPool, this parameter is ignored, no exception is raised except on unexpected errors. :type ignore_errors: bool """ client_id = connection_properties.get('client_id', None) if client_id is None: raise exception.BrickException( 'Invalid StorPool connection data, no client ID specified.') device_path = connection_properties.get('device_path', None) if device_path is None: LOG.debug('connection_properties is missing "device_path",' ' looking for "path" inside device_info') if device_info: device_path = device_info.get('path', None) if device_path is None: raise exception.BrickException( 'Invalid StorPool connection data, no device_path specified.') volume_name = path_to_volname(pathlib.Path(device_path)) try: sp_ourid = self._config["SP_OURID"] except KeyError: raise exception.BrickException( 'SP_OURID missing, cannot disconnect volume %s' % volume_name) try: self._detach_retry(sp_ourid, volume_name) except Exception as exc: raise exception.BrickException( 'Communication with the StorPool API ' 'failed: %s' % (exc)) from exc def get_search_path(self): return '/dev/storpool' def get_volume_paths(self, connection_properties): """Return the list of existing paths for a volume. The job of this method is to find out what paths in the system are associated with a volume as described by the connection_properties. :param connection_properties: The dictionary that describes all of the target volume attributes; it needs to contain 'volume' and 'device_path' values. :type connection_properties: dict """ volume_id = connection_properties.get('volume', None) if volume_id is None: raise exception.BrickException( 'Invalid StorPool connection data, no volume ID specified.') volume = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume_id) path = '/dev/storpool/' + volume dpath = connection_properties.get('device_path', None) if dpath is not None and dpath != path: raise exception.BrickException( 'Internal error: StorPool volume path %(path)s does not ' 'match device path %(dpath)s' % {'path': path, 'dpath': dpath}) return [path] def get_all_available_volumes(self, connection_properties=None): """Return all volumes that exist in the search directory. At connect_volume time, a Connector looks in a specific directory to discover a volume's paths showing up. This method's job is to return all paths in the directory that connect_volume uses to find a volume. This method is used in coordination with get_volume_paths() to verify that volumes have gone away after disconnect_volume has been called. :param connection_properties: The dictionary that describes all of the target volume attributes. Unused for the StorPool connector. :type connection_properties: dict """ names = [] prefix = storpool_utils.os_to_sp_volume_name(self._volume_prefix, '') prefixlen = len(prefix) if os.path.isdir('/dev/storpool'): files = os.listdir('/dev/storpool') for entry in files: full = '/dev/storpool/' + entry if entry.startswith(prefix) and os.path.islink(full) and \ not os.path.isdir(full): names.append(entry[prefixlen:]) return names def _get_device_size(self, device): """Get the size in bytes of a volume.""" (out, _err) = self._execute('blockdev', '--getsize64', device, run_as_root=True, root_helper=self._root_helper) var = str(out).strip() if var.isnumeric(): return int(var) else: return None @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties): """Update the attached volume's size. This method will attempt to update the local hosts's volume after the volume has been extended on the remote system. The new volume size in bytes will be returned. If there is a failure to update, then None will be returned. :param connection_properties: The volume connection properties. :returns: new size of the volume. """ # The StorPool client (storpool_block service) running on this host # should have picked up the change already, so it is enough to query # the actual disk device to see if its size is correct. # volume_id = connection_properties.get('volume', None) if volume_id is None: raise exception.BrickException( 'Invalid StorPool connection data, no volume ID specified.') # Get the expected (new) size from the StorPool API volume = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume_id) LOG.debug('Querying the StorPool API for the size of %(vol)s', {'vol': volume}) vdata = self._sp_api.volume(volume)[0] LOG.debug('Got size %(size)d', {'size': vdata['size']}) # Wait for the StorPool client to update the size of the local device path = '/dev/storpool/' + volume for _num in range(10): size = utils.get_device_size(self, path) LOG.debug('Got local size %(size)d', {'size': size}) if size == vdata['size']: return size time.sleep(0.1) else: size = utils.get_device_size(self, path) LOG.debug('Last attempt: local size %(size)d', {'size': size}) return size ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/connectors/vmware.py0000664000175000017500000003422500000000000023250 0ustar00zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile from oslo_log import log as logging from oslo_utils import fileutils try: from oslo_vmware import api from oslo_vmware import exceptions as oslo_vmw_exceptions from oslo_vmware import image_transfer from oslo_vmware.objects import datastore from oslo_vmware import rw_handles from oslo_vmware import vim_util except ImportError: vim_util = None from os_brick import exception from os_brick.i18n import _ from os_brick.initiator import initiator_connector LOG = logging.getLogger(__name__) class VmdkConnector(initiator_connector.InitiatorConnector): """Connector for volumes created by the VMDK driver. This connector is only used for backup and restore of Cinder volumes. """ TMP_IMAGES_DATASTORE_FOLDER_PATH = "cinder_temp" def __init__(self, *args, **kwargs): # Check if oslo.vmware library is available. if vim_util is None: message = _("Missing oslo_vmware python module, ensure oslo.vmware" " library is installed and available.") raise exception.BrickException(message=message) super(VmdkConnector, self).__init__(*args, **kwargs) self._ip = None self._port = None self._username = None self._password = None self._api_retry_count = None self._task_poll_interval = None self._ca_file = None self._insecure = None self._tmp_dir = None self._timeout = None @staticmethod def get_connector_properties(root_helper, *args, **kwargs): return {} def check_valid_device(self, path, *args, **kwargs): try: with open(path, 'r') as dev: dev.read(1) except IOError: LOG.exception( "Failed to access the device on the path " "%(path)s", {"path": path}) return False return True def get_volume_paths(self, connection_properties): return [] def get_search_path(self): return None def get_all_available_volumes(self, connection_properties=None): pass def _load_config(self, connection_properties): config = connection_properties['config'] self._ip = config['vmware_host_ip'] self._port = config['vmware_host_port'] self._username = config['vmware_host_username'] self._password = config['vmware_host_password'] self._api_retry_count = config['vmware_api_retry_count'] self._task_poll_interval = config['vmware_task_poll_interval'] self._ca_file = config['vmware_ca_file'] self._insecure = config['vmware_insecure'] self._tmp_dir = config['vmware_tmp_dir'] self._timeout = config['vmware_image_transfer_timeout_secs'] def _create_session(self): return api.VMwareAPISession(self._ip, self._username, self._password, self._api_retry_count, self._task_poll_interval, port=self._port, cacert=self._ca_file, insecure=self._insecure) def _create_temp_file(self, *args, **kwargs): fileutils.ensure_tree(self._tmp_dir) fd, tmp = tempfile.mkstemp(dir=self._tmp_dir, *args, **kwargs) os.close(fd) return tmp def _download_vmdk( self, tmp_file_path, session, backing, vmdk_path, vmdk_size): with open(tmp_file_path, "wb") as tmp_file: image_transfer.copy_stream_optimized_disk( None, self._timeout, tmp_file, session=session, host=self._ip, port=self._port, vm=backing, vmdk_file_path=vmdk_path, vmdk_size=vmdk_size) def connect_volume(self, connection_properties): # Download the volume vmdk from vCenter server to a temporary file # and return its path. self._load_config(connection_properties) session = self._create_session() tmp_file_path = self._create_temp_file( suffix=".vmdk", prefix=connection_properties['volume_id']) backing = vim_util.get_moref(connection_properties['volume'], "VirtualMachine") vmdk_path = connection_properties['vmdk_path'] vmdk_size = connection_properties['vmdk_size'] try: self._download_vmdk( tmp_file_path, session, backing, vmdk_path, vmdk_size) finally: session.logout() # Save the last modified time of the temporary so that we can decide # whether to upload the file back to vCenter server during disconnect. last_modified = os.path.getmtime(tmp_file_path) return {'path': tmp_file_path, 'last_modified': last_modified} def _snapshot_exists(self, session, backing): snapshot = session.invoke_api(vim_util, 'get_object_property', session.vim, backing, 'snapshot') if snapshot is None or snapshot.rootSnapshotList is None: return False return len(snapshot.rootSnapshotList) != 0 def _create_temp_ds_folder(self, session, ds_folder_path, dc_ref): fileManager = session.vim.service_content.fileManager try: session.invoke_api(session.vim, 'MakeDirectory', fileManager, name=ds_folder_path, datacenter=dc_ref) except oslo_vmw_exceptions.FileAlreadyExistsException: pass # Note(vbala) remove this method when we implement it in oslo.vmware def _upload_vmdk( self, read_handle, host, port, dc_name, ds_name, cookies, upload_file_path, file_size, cacerts, timeout_secs): write_handle = rw_handles.FileWriteHandle(host, port, dc_name, ds_name, cookies, upload_file_path, file_size, cacerts=cacerts) image_transfer._start_transfer(read_handle, write_handle, timeout_secs) def _get_disk_device(self, session, backing): hardware_devices = session.invoke_api(vim_util, 'get_object_property', session.vim, backing, 'config.hardware.device') if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if device.__class__.__name__ == "VirtualDisk": return device def _create_spec_for_disk_remove(self, session, disk_device): cf = session.vim.client.factory disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.operation = 'remove' disk_spec.fileOperation = 'destroy' disk_spec.device = disk_device return disk_spec def _reconfigure_backing(self, session, backing, reconfig_spec): LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.", {'backing': backing, 'spec': reconfig_spec}) reconfig_task = session.invoke_api(session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) LOG.debug("Task: %s created for reconfiguring backing VM.", reconfig_task) session.wait_for_task(reconfig_task) def _detach_disk_from_backing(self, session, backing, disk_device): LOG.debug("Reconfiguring backing VM: %(backing)s to remove disk: " "%(disk_device)s.", {'backing': backing, 'disk_device': disk_device}) cf = session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') spec = self._create_spec_for_disk_remove(session, disk_device) reconfig_spec.deviceChange = [spec] self._reconfigure_backing(session, backing, reconfig_spec) def _attach_disk_to_backing(self, session, backing, disk_device): LOG.debug("Reconfiguring backing VM: %(backing)s to add disk: " "%(disk_device)s.", {'backing': backing, 'disk_device': disk_device}) cf = session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.operation = 'add' disk_spec.device = disk_device reconfig_spec.deviceChange = [disk_spec] self._reconfigure_backing(session, backing, reconfig_spec) def _disconnect( self, backing, tmp_file_path, session, ds_ref, dc_ref, vmdk_path): # The restored volume is in compressed (streamOptimized) format. # So we upload it to a temporary location in vCenter datastore and copy # the compressed vmdk to the volume vmdk. The copy operation # decompresses the disk to a format suitable for attaching to Nova # instances in vCenter. dstore = datastore.get_datastore_by_ref(session, ds_ref) ds_path = dstore.build_path( VmdkConnector.TMP_IMAGES_DATASTORE_FOLDER_PATH, os.path.basename(tmp_file_path)) self._create_temp_ds_folder( session, str(ds_path.parent), dc_ref) with open(tmp_file_path, "rb") as tmp_file: dc_name = session.invoke_api( vim_util, 'get_object_property', session.vim, dc_ref, 'name') cookies = session.vim.client.cookiejar cacerts = self._ca_file if self._ca_file else not self._insecure self._upload_vmdk( tmp_file, self._ip, self._port, dc_name, dstore.name, cookies, ds_path.rel_path, os.path.getsize(tmp_file_path), cacerts, self._timeout) disk_device = self._get_disk_device(session, backing) self._detach_disk_from_backing(session, backing, disk_device) src = str(ds_path) LOG.debug("Copying %(src)s to %(dest)s", {'src': src, 'dest': vmdk_path}) disk_mgr = session.vim.service_content.virtualDiskManager task = session.invoke_api(session.vim, 'CopyVirtualDisk_Task', disk_mgr, sourceName=src, sourceDatacenter=dc_ref, destName=vmdk_path, destDatacenter=dc_ref) session.wait_for_task(task) self._attach_disk_to_backing(session, backing, disk_device) # Delete the compressed vmdk at the temporary location. LOG.debug("Deleting %s", src) file_mgr = session.vim.service_content.fileManager task = session.invoke_api(session.vim, 'DeleteDatastoreFile_Task', file_mgr, name=src, datacenter=dc_ref) session.wait_for_task(task) def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): tmp_file_path = device_info['path'] if not os.path.exists(tmp_file_path): msg = _("Vmdk: %s not found.") % tmp_file_path raise exception.NotFound(message=msg) session = None try: # We upload the temporary file to vCenter server only if it is # modified after connect_volume. if os.path.getmtime(tmp_file_path) > device_info['last_modified']: self._load_config(connection_properties) session = self._create_session() backing = vim_util.get_moref(connection_properties['volume'], "VirtualMachine") # Currently there is no way we can restore the volume if it # contains redo-log based snapshots (bug 1599026). if self._snapshot_exists(session, backing): msg = (_("Backing of volume: %s contains one or more " "snapshots; cannot disconnect.") % connection_properties['volume_id']) raise exception.BrickException(message=msg) ds_ref = vim_util.get_moref( connection_properties['datastore'], "Datastore") dc_ref = vim_util.get_moref( connection_properties['datacenter'], "Datacenter") vmdk_path = connection_properties['vmdk_path'] self._disconnect( backing, tmp_file_path, session, ds_ref, dc_ref, vmdk_path) finally: os.remove(tmp_file_path) if session: session.logout() def extend_volume(self, connection_properties): raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/host_driver.py0000664000175000017500000000223600000000000022117 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import errno import os class HostDriver(object): def get_all_block_devices(self) -> list[str]: """Get the list of all block devices seen in /dev/disk/by-path/.""" dir = "/dev/disk/by-path/" try: files = os.listdir(dir) except OSError as e: if e.errno == errno.ENOENT: files = [] else: raise devices = [] for file in files: devices.append(dir + file) return devices ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/initiator_connector.py0000664000175000017500000002260700000000000023647 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from os_brick import exception from os_brick import executor from os_brick import initiator class InitiatorConnector(executor.Executor, metaclass=abc.ABCMeta): # This object can be used on any platform (x86, S390) platform = initiator.PLATFORM_ALL # This object can be used on any os type (linux, windows) os_type = initiator.OS_TYPE_ALL def __init__(self, root_helper, driver=None, execute=None, device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): super(InitiatorConnector, self).__init__(root_helper, execute=execute, *args, **kwargs) self.device_scan_attempts = device_scan_attempts def set_driver(self, driver): """The driver is used to find used LUNs.""" self.driver = driver @staticmethod @abc.abstractmethod def get_connector_properties(root_helper, *args, **kwargs): """The generic connector properties.""" pass @abc.abstractmethod def check_valid_device(self, path, run_as_root=True): """Test to see if the device path is a real device. :param path: The file system path for the device. :type path: str :param run_as_root: run the tests as root user? :type run_as_root: bool :returns: bool """ pass @abc.abstractmethod def connect_volume(self, connection_properties): """Connect to a volume. The connection_properties describes the information needed by the specific protocol to use to make the connection. The connection_properties is a dictionary that describes the target volume. It varies slightly by protocol type (iscsi, fibre_channel), but the structure is usually the same. An example for iSCSI: {'driver_volume_type': 'iscsi', 'data': { 'target_luns': [0, 2], 'target_iqns': ['iqn.2000-05.com.3pardata:20810002ac00383d', 'iqn.2000-05.com.3pardata:21810002ac00383d'], 'target_discovered': True, 'encrypted': False, 'qos_specs': None, 'target_portals': ['10.52.1.11:3260', '10.52.2.11:3260'], 'access_mode': 'rw', }} An example for fibre_channel with single lun: {'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': {'100010604b010459': ['20210002AC00383D'], '100010604b01045d': ['20220002AC00383D']}, 'target_discovered': True, 'encrypted': False, 'qos_specs': None, 'target_lun': 1, 'access_mode': 'rw', 'target_wwn': [ '20210002AC00383D', '20220002AC00383D', ], }} An example for fibre_channel target_wwns and with different LUNs and all host ports mapped to target ports: {'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': { '100010604b010459': ['20210002AC00383D', '20220002AC00383D'], '100010604b01045d': ['20210002AC00383D', '20220002AC00383D'] }, 'target_discovered': True, 'encrypted': False, 'qos_specs': None, 'target_luns': [1, 2], 'access_mode': 'rw', 'target_wwns': ['20210002AC00383D', '20220002AC00383D'], }} For FC the dictionary could also present the enable_wildcard_scan key with a boolean value (defaults to True) in case a driver doesn't want OS-Brick to use a SCSI scan with wildcards when the FC initiator on the host doesn't find any target port. This is useful for drivers that know that sysfs gets populated whenever there's a connection between the host's HBA and the storage array's target ports. Encrypted volumes have some peculiar requirements on the path that must be returned, so it is recommended to decorate the method with the os_brick.utils.connect_volume_prepare_result to ensure that the right device path is returned to the caller. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :returns: dict """ pass @abc.abstractmethod def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect a volume from the local host. The connection_properties are the same as from connect_volume. The device_info is returned from connect_volume. If the connector's connect_volume is decorated with os_brick.utils.connect_volume_prepare_result then the path will have been changed by the decorator if the volume was encrypted, so if we need to have the original path that the connector returned instead of the modified one (for example to identify the WWN from the symlink) then we should use the os_brick.utils.connect_volume_undo_prepare_result decorator with the unlink_after=True parameter. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict :param force: Whether to forcefully disconnect even if flush fails. :type force: bool :param ignore_errors: When force is True, this will decide whether to ignore errors or raise an exception once finished the operation. Default is False. :type ignore_errors: bool """ pass @abc.abstractmethod def get_volume_paths(self, connection_properties): """Return the list of existing paths for a volume. The job of this method is to find out what paths in the system are associated with a volume as described by the connection_properties. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict """ pass @abc.abstractmethod def get_search_path(self): """Return the directory where a Connector looks for volumes. Some Connectors need the information in the connection_properties to determine the search path. """ pass @abc.abstractmethod def extend_volume(self, connection_properties): """Update the attached volume's size. This method will attempt to update the local hosts's volume after the volume has been extended on the remote system. The new volume size in bytes will be returned. If there is a failure to update, then None will be returned. If the connector's connect_volume is decorated with os_brick.utils.connect_volume_prepare_result then the path will have been changed by the decorator if the volume was encrypted, so if this method uses the original path as a shortcut to know which device to extend (instead of using the other connection information) then it should use the os_brick.utils.connect_volume_undo_prepare_result decorator on this method so that it gets the original path instead of the modified symlink one. :param connection_properties: The volume connection properties. :returns: new size of the volume. """ pass @abc.abstractmethod def get_all_available_volumes(self, connection_properties=None): """Return all volumes that exist in the search directory. At connect_volume time, a Connector looks in a specific directory to discover a volume's paths showing up. This method's job is to return all paths in the directory that connect_volume uses to find a volume. This method is used in coordination with get_volume_paths() to verify that volumes have gone away after disconnect_volume has been called. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict """ pass def check_IO_handle_valid(self, handle, data_type, protocol): """Check IO handle has correct data type.""" if (handle and not isinstance(handle, data_type)): raise exception.InvalidIOHandleObject( protocol=protocol, actual_type=type(handle)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/linuxfc.py0000664000175000017500000004071300000000000021241 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic linux Fibre Channel utilities.""" from __future__ import annotations import glob import os from typing import Iterable from oslo_concurrency import processutils as putils from oslo_log import log as logging from os_brick.initiator import linuxscsi LOG = logging.getLogger(__name__) class LinuxFibreChannel(linuxscsi.LinuxSCSI): FC_HOST_SYSFS_PATH = '/sys/class/fc_host' # Only load the sysfs attributes we care about HBA_ATTRIBUTES = ('port_name', 'node_name', 'port_state') def _get_target_fc_transport_path(self, path, wwpn, lun): """Scan target in the fc_transport path Scan for target in the following path: * /sys/class/fc_transport/target* :returns: List with [c, t, l] if the target path exists else empty list """ try: cmd = 'grep -Gil "%(wwpns)s" %(path)s*/port_name' % {'wwpns': wwpn, 'path': path} # We need to run command in shell to expand the * glob out, _err = self._execute(cmd, shell=True) # nosec: B604 # The grep command will only return 1 path (if found) # associated with the target wwpn used for the search # in the current HBA host out_path = out.split('\n')[0] if out_path.startswith(path): return out_path.split('/')[4].split(':')[1:] + [lun] except Exception as exc: LOG.debug('Could not get HBA channel and SCSI target ID, path:' ' %(path)s*, reason: %(reason)s', {'path': path, 'reason': exc}) return [] def _get_target_fc_remote_ports_path(self, path, wwpn, lun): """Scan target in the fc_remote_ports path Scan for target in the following path: * /sys/class/fc_remote_ports/rport-* If the path exist, we fetch the target value from the scsi_target_id file. Example: /sys/class/fc_remote_ports/rport-6:0-1/scsi_target_id :returns: List with [c, t, l] if the target path exists else empty list """ try: cmd = 'grep -Gil "%(wwpns)s" %(path)s*/port_name' % {'wwpns': wwpn, 'path': path} # We need to run command in shell to expand the * glob out, _err = self._execute(cmd, shell=True) # nosec: B604 # The scsi_target_id file contains the target ID. # Example path: # /sys/class/fc_remote_ports/rport-2:0-0/scsi_target_id target_path = os.path.dirname(out) + '/scsi_target_id' # There could be a case where the out variable has empty string # and we end up with a path '/scsi_target_id' so check if it # starts with the correct path if target_path.startswith(path): try: scsi_target = '-1' with open(target_path) as scsi_target_file: lines = scsi_target_file.read() scsi_target = lines.split('\n')[0] except OSError: # We were not able to read from the scsi_target_id # file but we can still discover other targets so # continue pass # If the target value is -1, it is not a real target so # skip it if scsi_target != '-1': channel = target_path.split(':')[1].split('-')[0] return [channel, scsi_target, lun] except Exception as exc: LOG.debug('Could not get HBA channel and SCSI target ID, path:' ' %(path)s*, reason: %(reason)s', {'path': path, 'reason': exc}) return [] def _get_hba_channel_scsi_target_lun(self, hba, conn_props): """Get HBA channels, SCSI targets, LUNs to FC targets for given HBA. Given an HBA and the connection properties we look for the HBA channels and SCSI targets for each of the FC targets that this HBA has been granted permission to connect. For drivers that don't return an initiator to target map we try to find the info for all the target ports. For drivers that return an initiator_target_map we use the initiator_target_lun_map entry that was generated by the FC connector based on the contents of the connection information data to know which target ports to look for. We scan for targets in the following two paths: * /sys/class/fc_transport/target* * /sys/class/fc_remote_ports/rport-* We search for targets in the fc_transport path first and if not found, we search in the fc_remote_ports path :returns: 2-Tuple with the first entry being a list of [c, t, l] entries where the target port was found, and the second entry of the tuple being a set of luns for ports that were not found. """ # We want the targets' WWPNs, so we use the initiator_target_map if # present for this hba or default to targets if not present. targets = conn_props['targets'] if conn_props.get('initiator_target_map') is not None: # This map we try to use was generated by the FC connector targets = conn_props['initiator_target_lun_map'].get( hba['port_name'], targets) # Leave only the number from the host_device field (ie: host6) host_device = hba['host_device'] if host_device and len(host_device) > 4: host_device = host_device[4:] path = '/sys/class/fc_transport/target%s:' % host_device rpath = '/sys/class/fc_remote_ports/rport-%s:' % host_device ctls = [] luns_not_found = set() for wwpn, lun in targets: # Search for target in the fc_transport path first and if we # don't find ctl, search for target in the fc_remote_ports path ctl = (self._get_target_fc_transport_path(path, wwpn, lun) or self._get_target_fc_remote_ports_path(rpath, wwpn, lun)) if ctl: ctls.append(ctl) else: # If we didn't find any paths add it to the not found list luns_not_found.add(lun) return ctls, luns_not_found def rescan_hosts(self, hbas: Iterable, connection_properties: dict) -> None: LOG.debug('Rescanning HBAs %(hbas)s with connection properties ' '%(conn_props)s', {'hbas': hbas, 'conn_props': connection_properties}) # Use initiator_target_lun_map (generated from initiator_target_map by # the FC connector) as HBA exclusion map ports = connection_properties.get('initiator_target_lun_map') if ports: hbas = [hba for hba in hbas if hba['port_name'] in ports] LOG.debug('Using initiator target map to exclude HBAs: %s', hbas) # Most storage arrays get their target ports automatically detected # by the Linux FC initiator and sysfs gets populated with that # information, but there are some that don't. We'll do a narrow scan # using the channel, target, and LUN for the former and a wider scan # for the latter. If all paths to a former type of array were down on # the system boot the array could look like it's of the latter type # and make us bring us unwanted volumes into the system by doing a # broad scan. To prevent this from happening Cinder drivers can use # the "enable_wildcard_scan" key in the connection_info to let us know # they don't want us to do broad scans even in those cases. broad_scan = connection_properties.get('enable_wildcard_scan', True) if not broad_scan: LOG.debug('Connection info disallows broad SCSI scanning') process = [] skipped = [] get_ctls = self._get_hba_channel_scsi_target_lun for hba in hbas: ctls, luns_wildcards = get_ctls(hba, connection_properties) # If we found the target ports, ignore HBAs that din't find them if ctls: process.append((hba, ctls)) # If target ports not found and should have, then the HBA is not # connected to our storage elif not broad_scan: LOG.debug('Skipping HBA %s, nothing to scan, target port ' 'not connected to initiator', hba['node_name']) # If we haven't found any target ports we may need to do broad # SCSI scans elif not process: skipped.append((hba, [('-', '-', lun) for lun in luns_wildcards])) # If we didn't find any target ports use wildcards if they are enabled process = process or skipped addressing_mode = connection_properties.get('addressing_mode') for hba, ctls in process: for hba_channel, target_id, target_lun in ctls: target_lun = self.lun_for_addressing(target_lun, addressing_mode) LOG.debug('Scanning %(host)s (wwnn: %(wwnn)s, c: ' '%(channel)s, t: %(target)s, l: %(lun)s)', {'host': hba['host_device'], 'wwnn': hba['node_name'], 'channel': hba_channel, 'target': target_id, 'lun': target_lun}) self.echo_scsi_command( "/sys/class/scsi_host/%s/scan" % hba['host_device'], "%(c)s %(t)s %(l)s" % {'c': hba_channel, 't': target_id, 'l': target_lun}) @classmethod def get_fc_hbas(cls) -> list[dict[str, str]]: """Get the Fibre Channel HBA information from sysfs.""" hbas = [] for hostpath in glob.glob(f'{cls.FC_HOST_SYSFS_PATH}/*'): try: hba = {'ClassDevice': os.path.basename(hostpath), 'ClassDevicepath': os.path.realpath(hostpath)} for attribute in cls.HBA_ATTRIBUTES: with open(os.path.join(hostpath, attribute), 'rt') as f: hba[attribute] = f.read().strip() hbas.append(hba) except Exception as exc: LOG.warning('Could not read attributes for %(hp)s: %(exc)s', {'hp': hostpath, 'exc': exc}) return hbas def get_fc_hbas_info(self) -> list[dict[str, str]]: """Get Fibre Channel WWNs and device paths from the system, if any.""" hbas = self.get_fc_hbas() hbas_info = [] for hba in hbas: wwpn = hba['port_name'].replace('0x', '') wwnn = hba['node_name'].replace('0x', '') device_path = hba['ClassDevicepath'] device = hba['ClassDevice'] hbas_info.append({'port_name': wwpn, 'node_name': wwnn, 'host_device': device, 'device_path': device_path}) return hbas_info def get_fc_wwpns(self) -> list[str]: """Get Fibre Channel WWPNs from the system, if any.""" hbas = self.get_fc_hbas() wwpns = [] for hba in hbas: if hba['port_state'] == 'Online': wwpn = hba['port_name'].replace('0x', '') wwpns.append(wwpn) return wwpns def get_fc_wwnns(self) -> list[str]: """Get Fibre Channel WWNNs from the system, if any.""" hbas = self.get_fc_hbas() wwnns = [] for hba in hbas: if hba['port_state'] == 'Online': wwnn = hba['node_name'].replace('0x', '') wwnns.append(wwnn) return wwnns class LinuxFibreChannelS390X(LinuxFibreChannel): def get_fc_hbas_info(self): """Get Fibre Channel WWNs and device paths from the system, if any.""" hbas = self.get_fc_hbas() hbas_info = [] for hba in hbas: if hba['port_state'] == 'Online': wwpn = hba['port_name'].replace('0x', '') wwnn = hba['node_name'].replace('0x', '') device_path = hba['ClassDevicepath'] device = hba['ClassDevice'] hbas_info.append({'port_name': wwpn, 'node_name': wwnn, 'host_device': device, 'device_path': device_path}) return hbas_info def configure_scsi_device(self, device_number, target_wwn, lun): """Write the LUN to the port's unit_add attribute. If auto-discovery of Fibre-Channel target ports is disabled on s390 platforms, ports need to be added to the configuration. If auto-discovery of LUNs is disabled on s390 platforms luns need to be added to the configuration through the unit_add interface """ LOG.debug("Configure lun for s390: device_number=%(device_num)s " "target_wwn=%(target_wwn)s target_lun=%(target_lun)s", {'device_num': device_number, 'target_wwn': target_wwn, 'target_lun': lun}) filepath = ("/sys/bus/ccw/drivers/zfcp/%s/%s" % (device_number, target_wwn)) if not (os.path.exists(filepath)): zfcp_device_command = ("/sys/bus/ccw/drivers/zfcp/%s/port_rescan" % (device_number)) LOG.debug("port_rescan call for s390: %s", zfcp_device_command) try: self.echo_scsi_command(zfcp_device_command, "1") except putils.ProcessExecutionError as exc: LOG.warning("port_rescan call for s390 failed exit" " %(code)s, stderr %(stderr)s", {'code': exc.exit_code, 'stderr': exc.stderr}) zfcp_device_command = ("/sys/bus/ccw/drivers/zfcp/%s/%s/unit_add" % (device_number, target_wwn)) LOG.debug("unit_add call for s390 execute: %s", zfcp_device_command) try: self.echo_scsi_command(zfcp_device_command, lun) except putils.ProcessExecutionError as exc: LOG.warning("unit_add call for s390 failed exit %(code)s, " "stderr %(stderr)s", {'code': exc.exit_code, 'stderr': exc.stderr}) def deconfigure_scsi_device(self, device_number, target_wwn, lun): """Write the LUN to the port's unit_remove attribute. If auto-discovery of LUNs is disabled on s390 platforms luns need to be removed from the configuration through the unit_remove interface """ LOG.debug("Deconfigure lun for s390: " "device_number=%(device_num)s " "target_wwn=%(target_wwn)s target_lun=%(target_lun)s", {'device_num': device_number, 'target_wwn': target_wwn, 'target_lun': lun}) zfcp_device_command = ("/sys/bus/ccw/drivers/zfcp/%s/%s/unit_remove" % (device_number, target_wwn)) LOG.debug("unit_remove call for s390 execute: %s", zfcp_device_command) try: self.echo_scsi_command(zfcp_device_command, lun) except putils.ProcessExecutionError as exc: LOG.warning("unit_remove call for s390 failed exit %(code)s, " "stderr %(stderr)s", {'code': exc.exit_code, 'stderr': exc.stderr}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/linuxrbd.py0000664000175000017500000002007700000000000021421 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """Generic RBD connection utilities.""" from __future__ import annotations import io from typing import NoReturn, Optional from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ from os_brick import utils try: import rados import rbd except ImportError: rados = None rbd = None LOG = logging.getLogger(__name__) class RBDClient(object): def __init__(self, user: str, pool: str, *args, **kwargs): self.rbd_user = user self.rbd_pool = pool self.rados: 'rados.Rados' self.rbd: 'rbd.RBD' for attr in ['rbd_user', 'rbd_pool']: val = getattr(self, attr) if val is not None: setattr(self, attr, utils.convert_str(val)) # allow these to be overridden for testing self.rados = kwargs.get('rados', rados) self.rbd = kwargs.get('rbd', rbd) if self.rados is None: raise exception.InvalidParameterValue( err=_('rados module required')) if self.rbd is None: raise exception.InvalidParameterValue( err=_('rbd module required')) self.rbd_conf: str = kwargs.get('conffile', '/etc/ceph/ceph.conf') self.rbd_cluster_name: str = kwargs.get('rbd_cluster_name', 'ceph') self.client, self.ioctx = self.connect() def __enter__(self): return self def __exit__(self, type_, value, traceback): self.disconnect() def connect(self) -> tuple['rados.Rados', 'rados.Ioctx']: LOG.debug("opening connection to ceph cluster") client = self.rados.Rados(rados_id=self.rbd_user, clustername=self.rbd_cluster_name, conffile=self.rbd_conf) try: client.connect() ioctx = client.open_ioctx(self.rbd_pool) return client, ioctx except self.rados.Error: msg = _("Error connecting to ceph cluster.") LOG.exception(msg) # shutdown cannot raise an exception client.shutdown() raise exception.BrickException(message=msg) def disconnect(self) -> None: # closing an ioctx cannot raise an exception self.ioctx.close() self.client.shutdown() class RBDVolume(object): """Context manager for dealing with an existing rbd volume.""" def __init__(self, client: RBDClient, name: str, snapshot: Optional[str] = None, read_only: bool = False): if snapshot is not None: snapshot = utils.convert_str(snapshot) try: self.image = client.rbd.Image(client.ioctx, utils.convert_str(name), snapshot=snapshot, read_only=read_only) except client.rbd.Error: LOG.exception("error opening rbd image %s", name) client.disconnect() raise # Ceph provides rbd.so to cinder, but we can't # get volume name from rbd.Image, so, we record # name here, so other modules can easily get # volume name. self.name = name self.client = client def close(self) -> None: try: self.image.close() finally: self.client.disconnect() def __enter__(self) -> 'RBDVolume': return self def __exit__(self, type_, value, traceback) -> None: self.close() def __getattr__(self, attrib): return getattr(self.image, attrib) class RBDImageMetadata(object): """RBD image metadata to be used with RBDVolumeIOWrapper.""" def __init__(self, image: 'rbd.Image', pool: Optional[str], user: Optional[str], conf: Optional[str]): self.image = image self.pool = utils.convert_str(pool or '') self.user = utils.convert_str(user or '') self.conf = utils.convert_str(conf or '') class RBDVolumeIOWrapper(io.RawIOBase): """Enables LibRBD.Image objects to be treated as Python IO objects. Calling unimplemented interfaces will raise IOError. """ def __init__(self, rbd_volume: RBDImageMetadata): super(RBDVolumeIOWrapper, self).__init__() self._rbd_volume = rbd_volume self._offset = 0 def _inc_offset(self, length: int) -> None: self._offset += length @property def rbd_image(self) -> 'rbd.Image': return self._rbd_volume.image @property def rbd_user(self) -> str: return self._rbd_volume.user @property def rbd_pool(self) -> str: return self._rbd_volume.pool @property def rbd_conf(self) -> str: return self._rbd_volume.conf def read(self, length: Optional[int] = None) -> bytes: offset = self._offset total = int(self._rbd_volume.image.size()) # NOTE(dosaboy): posix files do not barf if you read beyond their # length (they just return nothing) but rbd images do so we need to # return empty string if we have reached the end of the image. if (offset >= total): return b'' if length is None: length = total if (offset + length) > total: length = total - offset try: data = self._rbd_volume.image.read(int(offset), int(length)) except Exception: LOG.exception('Exception encountered during image read') raise self._inc_offset(length) return data def write(self, data) -> None: self._rbd_volume.image.write(data, self._offset) self._inc_offset(len(data)) def seekable(self) -> bool: return True def seek(self, offset: int, whence: int = 0): if whence == 0: new_offset = offset elif whence == 1: new_offset = self._offset + offset elif whence == 2: new_offset = self._rbd_volume.image.size() new_offset += offset else: raise IOError(_("Invalid argument - whence=%s not supported") % (whence)) if (new_offset < 0): raise IOError(_("Invalid argument")) self._offset = new_offset def tell(self) -> int: return self._offset def flush(self) -> None: # Raise ValueError if already closed super().flush() # Don't fail on flush by calling it when underlying image is closed. try: self.rbd_image.require_not_closed() except rbd.InvalidArgument: # Image is closed LOG.warning("RBDVolumeIOWrapper's underlying image %s was closed " "directly (probably by the GC) instead of through the " "wrapper", self.rbd_image.name) return try: self.rbd_image.flush() except AttributeError: LOG.warning("flush() not supported in this version of librbd") def fileno(self) -> NoReturn: """RBD does not have support for fileno() so we raise IOError. Raising IOError is recommended way to notify caller that interface is not supported - see http://docs.python.org/2/library/io.html#io.IOBase """ raise IOError(_("fileno() not supported by RBD()")) def close(self) -> None: if not self.closed: # Can't set closed attribute ourselves, call parent to flush and # change it. super().close() self.rbd_image.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/linuxscsi.py0000664000175000017500000011163100000000000021610 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic linux scsi subsystem and Multipath utilities. Note, this is not iSCSI. """ from __future__ import annotations import glob import os import re import time import typing from typing import Any, Iterable, Optional, Sequence from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from os_brick import constants from os_brick import exception from os_brick import executor from os_brick.privileged import rootwrap as priv_rootwrap from os_brick import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF MULTIPATH_ERROR_REGEX = re.compile(r"\w{3} \d+ \d\d:\d\d:\d\d \|.*$") MULTIPATH_WWID_REGEX = re.compile(r"\((?P.+)\)") MULTIPATH_DEVICE_ACTIONS = ['unchanged:', 'reject:', 'reload:', 'switchpg:', 'rename:', 'create:', 'resize:'] MULTIPATHD_RESIZE_TIMEOUT = 120 class LinuxSCSI(executor.Executor): # As found in drivers/scsi/scsi_lib.c WWN_TYPES = {'t10.': '1', 'eui.': '2', 'naa.': '3'} @staticmethod def lun_for_addressing(lun, addressing_mode=None) -> int: """Convert luns to values used by the system. How a LUN is codified depends on the standard being used by the storage array and the mode, which is unknown by the host. Addressing modes based on the standard: * SAM: - 64bit address * SAM-2: - Peripheral device addressing method (Code 00b) + Single level + Multi level - Flat space addressing method (Code 01b) - Logical unit addressing mode (Code 10b) - Extended logical unit addressing method (Code 11b) * SAM-3: Mostly same as SAM-2 but with some differences, like supporting addressing LUNs < 256 with flat address space. This means that the same LUN numbers could have different addressing values. Examples: * LUN 1: - SAM representation: 1 - SAM-2 peripheral: 1 - SAM-2 flat addressing: Invalid - SAM-3 flat addressing: 16384 * LUN 256 - SAM representation: 256 - SAM-2 peripheral: Not possible to represent - SAM-2 flat addressing: 16640 - SAM-3 flat addressing: 16640 This method makes the transformation from the numerical LUN value to the right addressing value based on the addressing_mode. Acceptable values are: - SAM: 64bit address with no translation - transparent: Same as SAM but used by drivers that want to use non supported addressing modes by using the addressing mode instead of the LUN without being misleading (untested). - SAM2: Peripheral for LUN < 256 and flat for LUN >= 256. In SAM-2 flat cannot be used for 0-255 - SAM3-flat: Force flat-space addressing The default is SAM/transparent and nothing will be done with the LUNs. """ mode = addressing_mode or constants.SCSI_ADDRESSING_SAM if mode not in constants.SCSI_ADDRESSING_MODES: raise exception.InvalidParameterValue('Invalid addressing_mode ' f'{addressing_mode}') if (mode == constants.SCSI_ADDRESSING_SAM3_FLAT or (mode == constants.SCSI_ADDRESSING_SAM2 and lun >= 256)): old_lun = lun lun += 16384 LOG.info('Transforming LUN value for addressing: %s -> %s', old_lun, lun) return lun def echo_scsi_command(self, path, content) -> None: """Used to echo strings to scsi subsystem.""" args = ["-a", path] kwargs = dict(process_input=content, run_as_root=True, root_helper=self._root_helper) self._execute('tee', *args, **kwargs) def get_name_from_path(self, path) -> Optional[str]: """Translates /dev/disk/by-path/ entry to /dev/sdX.""" name = os.path.realpath(path) if name.startswith("/dev/"): return name else: return None def remove_scsi_device(self, device: str, force: bool = False, exc=None, flush: bool = True) -> None: """Removes a scsi device based upon /dev/sdX name.""" path = "/sys/block/%s/device/delete" % device.replace("/dev/", "") if os.path.exists(path): exc = exception.ExceptionChainer() if exc is None else exc if flush: # flush any outstanding IO first with exc.context(force, 'Flushing %s failed', device): self.flush_device_io(device) LOG.debug("Remove SCSI device %(device)s with %(path)s", {'device': device, 'path': path}) with exc.context(force, 'Removing %s failed', device): self.echo_scsi_command(path, "1") def wait_for_volumes_removal(self, volumes_names: Iterable[str]) -> None: """Wait for device paths to be removed from the system.""" str_names = ', '.join(volumes_names) LOG.debug('Checking to see if SCSI volumes %s have been removed.', str_names) exist = ['/dev/' + volume_name for volume_name in volumes_names] # It can take up to 30 seconds to remove a SCSI device if the path # failed right before we start detaching, which is unlikely, but we # still shouldn't fail in that case. for i in range(61): exist = [path for path in exist if os.path.exists(path)] if not exist: LOG.debug("SCSI volumes %s have been removed.", str_names) return # Don't sleep on the last try since we are quitting if i < 60: time.sleep(0.5) # Log every 5 seconds if i % 10 == 0: LOG.debug('%s still exist.', ', '.join(exist)) raise exception.VolumePathNotRemoved(volume_path=exist) def get_device_info(self, device: str) -> dict[str, Optional[str]]: dev_info = {'device': device, 'host': None, 'channel': None, 'id': None, 'lun': None} # The input argument 'device' can be of 2 types: # (a) /dev/disk/by-path/XXX which is a symlink to /dev/sdX device # (b) /dev/sdX # If it's a symlink, get the /dev/sdX name first if os.path.islink(device): device = '/dev/' + os.readlink(device).split('/')[-1] # Else it's already a /dev/sdX device. # Then get it from lsscsi output (out, _err) = self._execute('lsscsi') if out: for line in out.strip().split('\n'): # The last column of lsscsi is device name if line.split()[-1] == device: # The first column of lsscsi is [H:C:T:L] hctl_info = line.split()[0].strip('[]').split(':') dev_info['host'] = hctl_info[0] dev_info['channel'] = hctl_info[1] dev_info['id'] = hctl_info[2] dev_info['lun'] = hctl_info[3] break LOG.debug('dev_info=%s', str(dev_info)) return dev_info def get_sysfs_wwn(self, device_names: list[str], mpath: Optional[str] = None) -> str: """Return the wwid from sysfs in any of devices in udev format.""" # If we have a multipath DM we know that it has found the WWN if mpath: # We have the WWN in /uuid even with friendly names, unline /name try: with open('/sys/block/%s/dm/uuid' % mpath) as f: # Contents are matph-WWN, so get the part we want wwid = f.read().strip()[6:] if wwid: # Check should not be needed, but just in case return wwid except Exception as exc: LOG.warning('Failed to read the DM uuid: %s', exc) wwid = self.get_sysfs_wwid(device_names) glob_str = '/dev/disk/by-id/scsi-' wwn_paths = glob.glob(glob_str + '*') # If we don't have multiple designators on page 0x83 if wwid and glob_str + wwid in wwn_paths: return wwid # If we have multiple designators use symlinks to find out the wwn device_names_set = set(device_names) for wwn_path in wwn_paths: try: if os.path.islink(wwn_path) and os.stat(wwn_path): path = os.path.realpath(wwn_path) if path.startswith('/dev/'): name = path[5:] # Symlink may point to the multipath dm if the attach # was too fast or we took long to check it. Check # devices belonging to the multipath DM. if name.startswith('dm-'): # Get the devices that belong to the DM slaves_path = '/sys/class/block/%s/slaves' % name dm_devs = os.listdir(slaves_path) # This is the right wwn_path if the devices we have # attached belong to the dm we followed if device_names_set.intersection(dm_devs): break # This is the right wwn_path if devices we have elif name in device_names_set: break except OSError: continue else: return '' return wwn_path[len(glob_str):] def get_sysfs_wwid(self, device_names: list[str]) -> str: """Return the wwid from sysfs in any of devices in udev format.""" for device_name in device_names: try: with open('/sys/block/%s/device/wwid' % device_name) as f: wwid = f.read().strip() except IOError: continue # The sysfs wwid has the wwn type in string format as a prefix, # but udev uses its numerical representation as returned by # scsi_id's page 0x83, so we need to map it udev_wwid = self.WWN_TYPES.get(wwid[:4], '8') + wwid[4:] return udev_wwid return '' def get_scsi_wwn(self, path: str) -> str: """Read the WWN from page 0x83 value for a SCSI device.""" (out, _err) = self._execute('/lib/udev/scsi_id', '--page', '0x83', '--whitelisted', path, run_as_root=True, root_helper=self._root_helper) return out.strip() @staticmethod def is_multipath_running(root_helper, execute=None) -> bool: try: if execute is None: execute = priv_rootwrap.execute cmd = ('multipathd', 'show', 'status') out, _err = execute(*cmd, run_as_root=True, root_helper=root_helper) # There was a bug in multipathd where it didn't return an error # code and just printed the error message in stdout. if out and out.startswith('error receiving packet'): return False except putils.ProcessExecutionError: return False return True def get_dm_name(self, dm: str) -> str: """Get the Device map name given the device name of the dm on sysfs. :param dm: Device map name as seen in sysfs. ie: 'dm-0' :returns: String with the name, or empty string if not available. ie: '36e843b658476b7ed5bc1d4d10d9b1fde' """ try: with open('/sys/block/' + dm + '/dm/name') as f: return f.read().strip() except IOError: return '' def find_sysfs_multipath_dm(self, device_names: Iterable[str]) -> Optional[str]: """Find the dm device name given a list of device names :param device_names: Iterable with device names, not paths. ie: ['sda'] :returns: String with the dm name or None if not found. ie: 'dm-0' """ glob_str = '/sys/block/%s/holders/dm-*' for dev_name in device_names: dms = glob.glob(glob_str % dev_name) if dms: __, device_name, __, dm = dms[0].rsplit('/', 3) return dm return None @staticmethod def requires_flush(path: str, path_used: Optional[str], was_multipath: bool) -> bool: """Check if a device needs to be flushed when detaching. A device representing a single path connection to a volume must only be flushed if it has been used directly by Nova or Cinder to write data. If the path has been used via a multipath DM or if the device was part of a multipath but a different single path was used for I/O (instead of the multipath) then we don't need to flush. """ # No used path happens on failed attachs, when we don't care about # individual flushes. if not path_used: return False path = os.path.realpath(path) path_used = os.path.realpath(path_used) # Need to flush this device if we used this specific path. We check # this before checking if it's multipath in case we don't detect it # being multipath correctly (as in bug #1897787). if path_used == path: return True # We flush individual path if Nova didn't use a multipath and we # replaced the symlink to a real device with a link to the decrypted # DM. We know we replaced it because it doesn't link to /dev/XYZ, # instead it maps to /dev/mapped/crypt-XYZ return not was_multipath and '/dev' != os.path.split(path_used)[0] def remove_connection(self, devices_names: Iterable[str], force: bool = False, exc=None, path_used: Optional[str] = None, was_multipath: bool = False) -> Optional[str]: """Remove LUNs and multipath associated with devices names. :param devices_names: Iterable with real device names ('sda', 'sdb') :param force: Whether to forcefully disconnect even if flush fails. :param exc: ExceptionChainer where to add exceptions if forcing :param path_used: What path was used by Nova/Cinder for I/O :param was_multipath: If the path used for I/O was a multipath :returns: Multipath device map name if found and not flushed """ if not devices_names: return None exc = exception.ExceptionChainer() if exc is None else exc multipath_dm = self.find_sysfs_multipath_dm(devices_names) LOG.debug('Removing %(type)s devices %(devices)s', {'type': 'multipathed' if multipath_dm else 'single pathed', 'devices': ', '.join(devices_names)}) multipath_name = multipath_dm and self.get_dm_name(multipath_dm) if multipath_name: with exc.context(force, 'Flushing %s failed', multipath_name): self.flush_multipath_device(multipath_name) multipath_name = None multipath_running = True else: multipath_running = self.is_multipath_running( root_helper=self._root_helper) for device_name in devices_names: dev_path = '/dev/' + device_name if multipath_running: # Recent multipathd doesn't remove path devices in time when # it receives mutiple udev events in a short span, so here we # tell multipathd to remove the path device immediately. # Even if this step fails, later removing an iscsi device # triggers a udev event and multipathd can remove the path # device based on the udev event self.multipath_del_path(dev_path) flush = self.requires_flush(dev_path, path_used, was_multipath) self.remove_scsi_device(dev_path, force, exc, flush) # Wait until the symlinks are removed with exc.context(force, 'Some devices remain from %s', devices_names): try: self.wait_for_volumes_removal(devices_names) finally: # Since we use /dev/disk/by-id/scsi- links to get the wwn we # must ensure they are always removed. self._remove_scsi_symlinks(devices_names) return multipath_name def _remove_scsi_symlinks(self, devices_names: Iterable[str]) -> None: devices = ['/dev/' + dev for dev in devices_names] links = glob.glob('/dev/disk/by-id/scsi-*') unlink = [] for link in links: try: if os.path.realpath(link) in devices: unlink.append(link) except OSError: # A race condition in Python's posixpath:realpath just occurred # so we can ignore it because the file was just removed between # a check if file exists and a call to os.readlink continue if unlink: priv_rootwrap.unlink_root(no_errors=True, *unlink) def flush_device_io(self, device: str) -> None: """This is used to flush any remaining IO in the buffers.""" if os.path.exists(device): try: # NOTE(geguileo): With 30% connection error rates flush can get # stuck, set timeout to prevent it from hanging here forever. # Retry twice after 20 and 40 seconds. LOG.debug("Flushing IO for device %s", device) self._execute('blockdev', '--flushbufs', device, run_as_root=True, attempts=3, timeout=300, interval=10, root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.warning("Failed to flush IO buffers prior to removing " "device: %(code)s", {'code': exc.exit_code}) raise def flush_multipath_device(self, device_map_name: str) -> None: LOG.debug("Flush multipath device %s", device_map_name) # NOTE(geguileo): With 30% connection error rates flush can get stuck, # set timeout to prevent it from hanging here forever. Retry twice # after 20 and 40 seconds. self._execute('multipath', '-f', device_map_name, run_as_root=True, attempts=3, timeout=300, interval=10, root_helper=self._root_helper) @utils.retry(exception.VolumeDeviceNotFound) def wait_for_path(self, volume_path: str) -> None: """Wait for a path to show up.""" LOG.debug("Checking to see if %s exists yet.", volume_path) if not os.path.exists(volume_path): LOG.debug("%(path)s doesn't exists yet.", {'path': volume_path}) raise exception.VolumeDeviceNotFound( device=volume_path) else: LOG.debug("%s has shown up.", volume_path) @utils.retry(exception.BlockDeviceReadOnly, retries=5) def wait_for_rw(self, wwn: str, device_path: str) -> None: """Wait for block device to be Read-Write.""" LOG.debug("Checking to see if %s is read-only.", device_path) out, info = self._execute('lsblk', '-o', 'NAME,RO', '-l', '-n') LOG.debug("lsblk output: %s", out) blkdevs = out.splitlines() for blkdev in blkdevs: # Entries might look like: # # "3624a93709a738ed78583fd120013902b (dm-1) 1" # # or # # "sdd 0" # # We are looking for the first and last part of them. For FC # multipath devices the name is in the format of ' (dm-)' blkdev_parts = blkdev.split(' ') ro = blkdev_parts[-1] name = blkdev_parts[0] # We must validate that all pieces of the dm-# device are rw, # if some are still ro it can cause problems. if wwn in name and int(ro) == 1: LOG.debug("Block device %s is read-only", device_path) self._execute('multipath', '-r', check_exit_code=[0, 1, 21], run_as_root=True, root_helper=self._root_helper) raise exception.BlockDeviceReadOnly( device=device_path) else: LOG.debug("Block device %s is not read-only.", device_path) def find_multipath_device_path(self, wwn: str) -> Optional[str]: """Look for the multipath device file for a volume WWN. Multipath devices can show up in several places on a linux system. 1) When multipath friendly names are ON: a device file will show up in /dev/disk/by-id/dm-uuid-mpath- /dev/disk/by-id/dm-name-mpath /dev/disk/by-id/scsi-mpath /dev/mapper/mpath 2) When multipath friendly names are OFF: /dev/disk/by-id/dm-uuid-mpath- /dev/disk/by-id/scsi- /dev/mapper/ """ LOG.info("Find Multipath device file for volume WWN %(wwn)s", {'wwn': wwn}) # First look for the common path wwn_dict = {'wwn': wwn} path = "/dev/disk/by-id/dm-uuid-mpath-%(wwn)s" % wwn_dict try: self.wait_for_path(path) return path except exception.VolumeDeviceNotFound: pass # for some reason the common path wasn't found # lets try the dev mapper path path = "/dev/mapper/%(wwn)s" % wwn_dict try: self.wait_for_path(path) return path except exception.VolumeDeviceNotFound: pass # couldn't find a path LOG.warning("couldn't find a valid multipath device path for " "%(wwn)s", wwn_dict) return None def find_multipath_device(self, device: str) -> Optional[dict[str, Any]]: """Discover multipath devices for a mpath device. This uses the slow multipath -l command to find a multipath device description, then screen scrapes the output to discover the multipath device name and it's devices. """ mdev = None devices = [] out = None try: (out, _err) = self._execute('multipath', '-l', device, run_as_root=True, root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.warning("multipath call failed exit %(code)s", {'code': exc.exit_code}) raise exception.CommandExecutionFailed( cmd='multipath -l %s' % device) if out: lines_str = out.strip() lines = lines_str.split("\n") lines = [line for line in lines if not re.match(MULTIPATH_ERROR_REGEX, line) and len(line)] if lines: mdev_name = lines[0].split(" ")[0] if mdev_name in MULTIPATH_DEVICE_ACTIONS: mdev_name = lines[0].split(" ")[1] mdev = '/dev/mapper/%s' % mdev_name # Confirm that the device is present. try: os.stat(mdev) except OSError: LOG.warning("Couldn't find multipath device %s", mdev) return None wwid_search = MULTIPATH_WWID_REGEX.search(lines[0]) if wwid_search is not None: mdev_id = wwid_search.group('wwid') else: mdev_id = mdev_name LOG.debug("Found multipath device = %(mdev)s", {'mdev': mdev}) device_lines = lines[3:] for dev_line in device_lines: if dev_line.find("policy") != -1: continue dev_line = dev_line.lstrip(' |-`') dev_info = dev_line.split() address = dev_info[0].split(":") dev = {'device': '/dev/%s' % dev_info[1], 'host': address[0], 'channel': address[1], 'id': address[2], 'lun': address[3] } devices.append(dev) if mdev is not None: info = {"device": mdev, "id": mdev_id, "name": mdev_name, "devices": devices} return info return None def multipath_reconfigure(self) -> str: """Issue a multipathd reconfigure. When attachments come and go, the multipathd seems to get lost and not see the maps. This causes resize map to fail 100%. To overcome this we have to issue a reconfigure prior to resize map. """ (out, _err) = self._execute('multipathd', 'reconfigure', run_as_root=True, root_helper=self._root_helper) return out def _multipath_resize_map(self, dm_path: str) -> str: cmd = ('multipathd', 'resize', 'map', dm_path) (out, _err) = self._execute(*cmd, run_as_root=True, root_helper=self._root_helper) if 'fail' in out or 'timeout' in out: raise putils.ProcessExecutionError( stdout=out, stderr=_err, exit_code=1, cmd=cmd) return out def multipath_resize_map(self, dm_path: str) -> None: """Issue a multipath resize map on device. This forces the multipath daemon to update it's size information a particular multipath device. :param dm_path: Real path of the DM device (eg: /dev/dm-5) """ # "multipathd reconfigure" is async since 0.6.1. While the # operation is in progress, "multipathd resize map" returns # "timeout". tstart = time.time() while True: try: self._multipath_resize_map(dm_path) break except putils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(reraise=True) as ctx: elapsed = time.time() - tstart if 'timeout' in err.stdout and ( elapsed < MULTIPATHD_RESIZE_TIMEOUT): LOG.debug( "multipathd resize map timed out. " "Elapsed: %s, timeout: %s. Retrying...", elapsed, MULTIPATHD_RESIZE_TIMEOUT) ctx.reraise = False time.sleep(1) def extend_volume(self, volume_paths: list, use_multipath: bool = False) -> Optional[int]: """Signal the SCSI subsystem to test for volume resize. This function tries to signal the local system's kernel that an already attached volume might have been resized. """ # We need all paths up before extending the devices. # see Launchpad Bug: #2032177 for more details. LOG.debug("Checking paths are valid %s", volume_paths) for volume_path in volume_paths: if not utils.check_valid_device(self, volume_path): LOG.error("Path status is down for path %s", volume_path) raise exception.BrickException("All paths need to be up " "to extend the device.") LOG.debug("extend volume %s", volume_paths) for volume_path in volume_paths: device = self.get_device_info(volume_path) LOG.debug("Volume device info = %s", device) device_id = ("%(host)s:%(channel)s:%(id)s:%(lun)s" % {'host': device['host'], 'channel': device['channel'], 'id': device['id'], 'lun': device['lun']}) scsi_path = ("/sys/bus/scsi/drivers/sd/%(device_id)s" % {'device_id': device_id}) size = utils.get_device_size(self, volume_path) LOG.debug("Starting size: %s", size) # now issue the device rescan rescan_path = "%(scsi_path)s/rescan" % {'scsi_path': scsi_path} self.echo_scsi_command(rescan_path, "1") new_size = utils.get_device_size(self, volume_path) LOG.debug("volume size after scsi device rescan %s", new_size) scsi_wwn = self.get_scsi_wwn(volume_paths[0]) if use_multipath: mpath_device = self.find_multipath_device_path(scsi_wwn) if mpath_device: # Force a reconfigure so that resize works self.multipath_reconfigure() size = utils.get_device_size(self, mpath_device) LOG.info("mpath(%(device)s) current size %(size)s", {'device': mpath_device, 'size': size}) self.multipath_resize_map(os.path.realpath(mpath_device)) new_size = utils.get_device_size(self, mpath_device) LOG.info("mpath(%(device)s) new size %(size)s", {'device': mpath_device, 'size': new_size}) return new_size @typing.overload def process_lun_id(self, lun_ids: str | int) -> str | int: ... @typing.overload def process_lun_id(self, lun_ids: list[str | int]) -> list[str | int]: ... def process_lun_id(self, lun_ids: list[str | int] | str | int) -> \ list[str | int] | int | str: processed: list[str | int] | int | str if isinstance(lun_ids, list): processed = [] for x in lun_ids: x = self._format_lun_id(x) processed.append(x) else: processed = self._format_lun_id(lun_ids) return processed def _format_lun_id(self, lun_id: int | str) -> int | str: # make sure lun_id is an int lun_id = int(lun_id) if lun_id < 256: return lun_id else: return ("0x%04x%04x00000000" % (lun_id & 0xffff, lun_id >> 16 & 0xffff)) def get_hctl(self, session: str, lun: str) -> \ Optional[tuple[str, str, str, str]]: """Given an iSCSI session return the host, channel, target, and lun.""" glob_str = '/sys/class/iscsi_host/host*/device/session' + session paths = glob.glob(glob_str + '/target*') if paths: __, channel, target = os.path.split(paths[0])[1].split(':') # Check if we can get the host else: target = channel = '-' paths = glob.glob(glob_str) if not paths: LOG.debug('No hctl found on session %s with lun %s', session, lun) return None # Extract the host number from the path host = paths[0][26:paths[0].index('/', 26)] res = (host, channel, target, lun) LOG.debug('HCTL %s found on session %s with lun %s', res, session, lun) return res def device_name_by_hctl(self, session: str, hctl: Sequence) -> \ Optional[str]: """Find the device name given a session and the hctl. :param session: A string with the session number :param hctl: An iterable with the host, channel, target, and lun as passed to scan. ie: ('5', '-', '-', '0') """ if '-' in hctl: hctl = ['*' if x == '-' else x for x in hctl] path = ('/sys/class/scsi_host/host%(h)s/device/session%(s)s/target' '%(h)s:%(c)s:%(t)s/%(h)s:%(c)s:%(t)s:%(l)s/block/*' % {'h': hctl[0], 'c': hctl[1], 't': hctl[2], 'l': hctl[3], 's': session}) # Sort devices and return the first so we don't return a partition devices = sorted(glob.glob(path)) device = os.path.split(devices[0])[1] if devices else None LOG.debug('Searching for a device in session %s and hctl %s yield: %s', session, hctl, device) return device def scan_iscsi(self, host, channel='-', target='-', lun='-') -> None: """Send an iSCSI scan request given the host and optionally the ctl.""" LOG.debug('Scanning host %(host)s c: %(channel)s, ' 't: %(target)s, l: %(lun)s)', {'host': host, 'channel': channel, 'target': target, 'lun': lun}) self.echo_scsi_command('/sys/class/scsi_host/host%s/scan' % host, '%(c)s %(t)s %(l)s' % {'c': channel, 't': target, 'l': lun}) def multipath_add_wwid(self, wwid: str) -> bool: """Add a wwid to the list of know multipath wwids. This has the effect of multipathd being willing to create a dm for a multipath even when there's only 1 device. """ out, err = self._execute('multipath', '-a', wwid, run_as_root=True, check_exit_code=False, root_helper=self._root_helper) return out.strip() == "wwid '" + wwid + "' added" def multipath_add_path(self, realpath: str) -> bool: """Add a path to multipathd for monitoring. This has the effect of multipathd checking an already checked device for multipath. Together with `multipath_add_wwid` we can create a multipath when there's only 1 path. """ stdout, stderr = self._execute('multipathd', 'add', 'path', realpath, run_as_root=True, timeout=5, check_exit_code=False, root_helper=self._root_helper) return stdout.strip() == 'ok' def multipath_del_path(self, realpath: str) -> bool: """Remove a path from multipathd for monitoring.""" stdout, stderr = self._execute('multipathd', 'del', 'path', realpath, run_as_root=True, timeout=5, check_exit_code=False, root_helper=self._root_helper) return stdout.strip() == 'ok' @utils.retry((putils.ProcessExecutionError, exception.BrickException), retries=3) def multipath_del_map(self, mpath: str) -> None: """Stop monitoring a multipath given its device name (eg: dm-7). Method ensures that the multipath device mapper actually dissapears from sysfs. """ map_name = self.get_dm_name(mpath) if map_name: self._execute('multipathd', 'del', 'map', map_name, run_as_root=True, timeout=5, root_helper=self._root_helper) if map_name and self.get_dm_name(mpath): raise exception.BrickException("Multipath doesn't go away") LOG.debug('Multipath %s no longer present', mpath) def wait_for_mpath_device(self, mpath: str) -> None: """Wait for multipath device to become ready for I/O. mpath is the kernel name of the device (dm-*) which is the expected argument for multipath -C command. """ try: self._execute('multipath', '-C', mpath, attempts=CONF.os_brick.wait_mpath_device_attempts, interval=CONF.os_brick.wait_mpath_device_interval, run_as_root=True, root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.error("Failed to get mpath device %(mpath)s ready for " "I/O: %(except)s", {'mpath': mpath, 'except': exc}) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/storpool_utils.py0000664000175000017500000001612500000000000022672 0ustar00zuulzuul00000000000000# Copyright (c) 2015 - 2024 StorPool # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import configparser import errno import http.client import json import os import pathlib import platform import socket import time from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ LOG = logging.getLogger(__name__) DEV_STORPOOL = pathlib.Path('/dev/storpool') DEV_STORPOOL_BYID = pathlib.Path('/dev/storpool-byid') STORPOOL_CONF_DEFAULTS = { "SP_API_HTTP_HOST": "127.0.0.1", "SP_API_HTTP_PORT": "81", } ENV_OVERRIDE = ["SP_AUTH_TOKEN", "SP_API_HTTP_HOST", "SP_API_HTTP_PORT"] def get_conf(section=None, use_env=True): """Load the StorPool configuration from files and the environment.""" config_path = pathlib.Path('/etc/storpool.conf') config_dir_path = pathlib.Path('/etc/storpool.conf.d') def _read_with_unnamed(a_parser, file): with open(file) as stream: a_parser.read_string('[UNNAMED_SECTION]\n' + stream.read()) def _get_env_overrides(): overrides = {} for override in ENV_OVERRIDE: if (value := os.environ.get(override)) is not None: overrides[override] = value return overrides parser = configparser.ConfigParser(strict=False, allow_no_value=True) parser.optionxform = str if not config_path.is_file(): message = "File %(file)s does not exist or not a file" raise exception.BrickException(message, file=config_path) _read_with_unnamed(parser, config_path) if config_dir_path.is_dir(): for path in sorted(config_dir_path.iterdir()): path_str = str(path) if path.is_file() \ and path_str.endswith(".conf") \ and not path_str.startswith("."): _read_with_unnamed(parser, path) if section is None: section = platform.node() conf = dict(STORPOOL_CONF_DEFAULTS) for sect in ['UNNAMED_SECTION', section]: if parser.has_section(sect): conf.update(dict(parser[sect])) if use_env: conf.update(_get_env_overrides()) return conf def os_to_sp_volume_name(prefix, volume_id): return "{pfx}--volume-{id}".format(pfx=prefix, id=volume_id) def os_to_sp_snapshot_name(prefix, type, snapshot_id, more=None): return "{pfx}--{t}--{m}--snapshot-{id}".format( pfx=prefix, t=type, m="none" if more is None else more, id=snapshot_id, ) class StorPoolAPIError(exception.BrickException): """Borrowed from `storpool.spapi`""" message = _("HTTP: %(status)s, %(name)s: %(desc)s") def __init__(self, status, json): self.status = status self.json = json self.name = json['error'].get('name', "") self.desc = json['error'].get('descr', "") self.transient = json['error'].get('transient', False) super(StorPoolAPIError, self).__init__( status=status, name=self.name, desc=self.desc) class StorPoolAPI: """A subset of the Python package `storpool` for a StorPool API client.""" def __init__(self, host, port, auth, timeout = 300, transient_retries = 5): self.url = f"{host}:{port}" self.auth_header = {'Authorization': f'Storpool v1:{auth}'} self.timeout = timeout self.transient_retries = transient_retries def _api_call(self, method, path, body = None): retry = 0 last_error = None while True: connection = None try: connection = http.client.HTTPConnection( self.url, timeout=self.timeout) if body: body = json.dumps(body) connection.request(method, path, body, self.auth_header) response = connection.getresponse() status, jres = response.status, json.load(response) if status == http.client.OK and 'error' not in jres: return jres['data'] last_error = StorPoolAPIError(status, jres) if not jres['error'].get('transient', False): raise last_error except (socket.error, http.client.HTTPException) as err: if not (isinstance(err, http.client.HTTPException) or err.errno in (errno.ECONNREFUSED, errno.ECONNRESET)): raise last_error = err finally: if connection: connection.close() if retry >= self.transient_retries: raise last_error time.sleep(2**retry) retry += 1 def disks_list(self): return self._api_call('GET', '/ctrl/1.0/DisksList') def volume_templates_list(self): return self._api_call('GET', '/ctrl/1.0/VolumeTemplatesList') def volumes_reassign(self, data): self._api_call('POST', '/ctrl/1.0/MultiCluster/VolumesReassign', data) def volumes_reassign_wait(self, data): self._api_call( 'POST', '/ctrl/1.0/MultiCluster/VolumesReassignWait', data) def volume(self, volume): return self._api_call( 'GET', f'/ctrl/1.0/MultiCluster/Volume/{volume}') def volume_create(self, data): self._api_call('POST', '/ctrl/1.0/MultiCluster/VolumeCreate', data) def volume_get_info(self, volume): return self._api_call( 'GET', f'/ctrl/1.0/MultiCluster/VolumeGetInfo/{volume}') def volume_update(self, volume, data): self._api_call( 'POST', f'/ctrl/1.0/MultiCluster/VolumeUpdate/{volume}', data) def volume_revert(self, volume, data): self._api_call( 'POST', f'/ctrl/1.0/MultiCluster/VolumeRevert/{volume}', data) def volume_delete(self, volume): self._api_call('POST', f'/ctrl/1.0/MultiCluster/VolumeDelete/{volume}') def volumes_list(self): return self._api_call('GET', '/ctrl/1.0/MultiCluster/VolumesList') def snapshot_create(self, volume, data): self._api_call( 'POST', f'/ctrl/1.0/MultiCluster/VolumeSnapshot/{volume}', data) def snapshot_update(self, snapshot, data): self._api_call( 'POST', f'/ctrl/1.0/SnapshotUpdate/{snapshot}', data) def snapshot_delete(self, snapshot): self._api_call( 'POST', f'/ctrl/1.0/MultiCluster/SnapshotDelete/{snapshot}') def get_iscsi_config(self): return self._api_call('GET', '/ctrl/1.0/iSCSIConfig') def post_iscsi_config(self, data): return self._api_call('POST', '/ctrl/1.0/iSCSIConfig', data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/utils.py0000664000175000017500000000346000000000000020727 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import os from typing import Generator from oslo_concurrency import lockutils from oslo_concurrency import processutils as putils def check_manual_scan() -> bool: if os.name == 'nt': return False try: putils.execute('grep', '-F', 'node.session.scan', '/sbin/iscsiadm') except putils.ProcessExecutionError: return False return True ISCSI_SUPPORTS_MANUAL_SCAN = check_manual_scan() @contextlib.contextmanager def guard_connection(device: dict) -> Generator: """Context Manager handling locks for attach/detach operations. In Cinder microversion 3.69 the shared_targets field for volumes are tristate: - True ==> Lock if iSCSI initiator doesn't support manual scans - False ==> Never lock. - None ==> Always lock. """ shared = device.get('shared_targets', False) if (shared is not None and ISCSI_SUPPORTS_MANUAL_SCAN) or shared is False: yield else: # Cinder passes an OVO, but Nova passes a dictionary, so we use dict # key access that works with both. with lockutils.lock(device['service_uuid'], 'os-brick-', external=True): yield ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.611759 os_brick-6.11.0/os_brick/initiator/windows/0000775000175000017500000000000000000000000020704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/windows/__init__.py0000664000175000017500000000000000000000000023003 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/windows/base.py0000664000175000017500000001116000000000000022167 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from os_win import utilsfactory from oslo_concurrency import processutils as putils from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ from os_brick import initiator from os_brick.initiator import initiator_connector from os_brick import utils LOG = logging.getLogger(__name__) class BaseWindowsConnector(initiator_connector.InitiatorConnector): platform = initiator.PLATFORM_ALL os_type = initiator.OS_TYPE_WINDOWS DEFAULT_DEVICE_SCAN_INTERVAL = 2 def __init__(self, root_helper=None, *args, **kwargs): warnings.warn('Support for Windows OS has been deprecated.', category=DeprecationWarning, stacklevel=2) kwargs['executor'] = kwargs.get('executor') or putils.execute super(BaseWindowsConnector, self).__init__(root_helper, *args, **kwargs) self.device_scan_interval = kwargs.pop( 'device_scan_interval', self.DEFAULT_DEVICE_SCAN_INTERVAL) self._diskutils = utilsfactory.get_diskutils() @staticmethod def check_multipath_support(enforce_multipath): hostutils = utilsfactory.get_hostutils() mpio_enabled = hostutils.check_server_feature( hostutils.FEATURE_MPIO) if not mpio_enabled: err_msg = _("Using multipath connections for iSCSI and FC disks " "requires the Multipath IO Windows feature to be " "enabled. MPIO must be configured to claim such " "devices.") LOG.error(err_msg) if enforce_multipath: raise exception.BrickException(err_msg) return False return True @staticmethod def get_connector_properties(*args, **kwargs): multipath = kwargs['multipath'] enforce_multipath = kwargs['enforce_multipath'] props = {} props['multipath'] = ( multipath and BaseWindowsConnector.check_multipath_support(enforce_multipath)) return props def _get_scsi_wwn(self, device_number): # NOTE(lpetrut): The Linux connectors use scsi_id to retrieve the # disk unique id, which prepends the identifier type to the unique id # retrieved from the page 83 SCSI inquiry data. We'll do the same # to remain consistent. disk_uid, uid_type = self._diskutils.get_disk_uid_and_uid_type( device_number) scsi_wwn = '%s%s' % (uid_type, disk_uid) return scsi_wwn def check_valid_device(self, path, *args, **kwargs): try: with open(path, 'r') as dev: dev.read(1) except IOError: LOG.exception( "Failed to access the device on the path " "%(path)s", {"path": path}) return False return True def get_all_available_volumes(self): # TODO(lpetrut): query for disks based on the protocol used. return [] def _check_device_paths(self, device_paths): if len(device_paths) > 1: err_msg = _("Multiple volume paths were found: %s. This can " "occur if multipath is used and MPIO is not " "properly configured, thus not claiming the device " "paths. This issue must be addressed urgently as " "it can lead to data corruption.") raise exception.BrickException(err_msg % device_paths) @utils.trace def extend_volume(self, connection_properties): volume_paths = self.get_volume_paths(connection_properties) if not volume_paths: err_msg = _("Could not find the disk. Extend failed.") raise exception.NotFound(err_msg) device_path = volume_paths[0] device_number = self._diskutils.get_device_number_from_device_name( device_path) self._diskutils.refresh_disk(device_number) def get_search_path(self): return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/windows/fibre_channel.py0000664000175000017500000002035300000000000024040 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import time from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ from os_brick.initiator.windows import base as win_conn_base from os_brick import utils LOG = logging.getLogger(__name__) class WindowsFCConnector(win_conn_base.BaseWindowsConnector): def __init__(self, *args, **kwargs): super(WindowsFCConnector, self).__init__(*args, **kwargs) self.use_multipath = kwargs.get('use_multipath', False) self._fc_utils = utilsfactory.get_fc_utils() @staticmethod def get_connector_properties(*args, **kwargs): props = {} fc_utils = utilsfactory.get_fc_utils() fc_utils.refresh_hba_configuration() fc_hba_ports = fc_utils.get_fc_hba_ports() if fc_hba_ports: wwnns = [] wwpns = [] for port in fc_hba_ports: wwnns.append(port['node_name']) wwpns.append(port['port_name']) props['wwpns'] = wwpns props['wwnns'] = list(set(wwnns)) return props @utils.trace def connect_volume(self, connection_properties): volume_paths = self.get_volume_paths(connection_properties) if not volume_paths: raise exception.NoFibreChannelVolumeDeviceFound() device_path = volume_paths[0] device_number = self._diskutils.get_device_number_from_device_name( device_path) scsi_wwn = self._get_scsi_wwn(device_number) device_info = {'type': 'block', 'path': device_path, 'number': device_number, 'scsi_wwn': scsi_wwn} return device_info @utils.trace def get_volume_paths(self, connection_properties): # Returns a list containing at most one disk path such as # \\.\PhysicalDrive4. # # If multipath is used and the MPIO service is properly configured # to claim the disks, we'll still get a single device path, having # the same format, which will be used for all the IO operations. for attempt_num in range(self.device_scan_attempts): disk_paths = set() if attempt_num: time.sleep(self.device_scan_interval) self._diskutils.rescan_disks() volume_mappings = self._get_fc_volume_mappings( connection_properties) LOG.debug("Retrieved volume mappings %(vol_mappings)s " "for volume %(conn_props)s", dict(vol_mappings=volume_mappings, conn_props=connection_properties)) for mapping in volume_mappings: device_name = mapping['device_name'] if device_name: disk_paths.add(device_name) if not disk_paths and volume_mappings: fcp_lun = volume_mappings[0]['fcp_lun'] try: disk_paths = self._get_disk_paths_by_scsi_id( connection_properties, fcp_lun) disk_paths = set(disk_paths or []) except os_win_exc.OSWinException as ex: LOG.debug("Failed to retrieve disk paths by SCSI ID. " "Exception: %s", ex) if not disk_paths: LOG.debug("No disk path retrieved yet.") continue if len(disk_paths) > 1: LOG.debug("Multiple disk paths retrieved: %s This may happen " "if MPIO did not claim them yet.", disk_paths) continue dev_num = self._diskutils.get_device_number_from_device_name( list(disk_paths)[0]) if self.use_multipath and not self._diskutils.is_mpio_disk( dev_num): LOG.debug("Multipath was requested but the disk %s was not " "claimed yet by the MPIO service.", dev_num) continue return list(disk_paths) return [] def _get_fc_volume_mappings(self, connection_properties): # Note(lpetrut): All the WWNs returned by os-win are upper case. target_wwpns = [wwpn.upper() for wwpn in connection_properties['target_wwn']] target_lun = connection_properties['target_lun'] volume_mappings = [] hba_mappings = self._get_fc_hba_mappings() for node_name in hba_mappings: target_mappings = self._fc_utils.get_fc_target_mappings(node_name) for mapping in target_mappings: if (mapping['port_name'] in target_wwpns and mapping['lun'] == target_lun): volume_mappings.append(mapping) return volume_mappings def _get_fc_hba_mappings(self): mappings = collections.defaultdict(list) fc_hba_ports = self._fc_utils.get_fc_hba_ports() for port in fc_hba_ports: mappings[port['node_name']].append(port['port_name']) return mappings def _get_disk_paths_by_scsi_id(self, connection_properties, fcp_lun): for local_port_wwn, remote_port_wwns in connection_properties[ 'initiator_target_map'].items(): for remote_port_wwn in remote_port_wwns: try: dev_nums = self._get_dev_nums_by_scsi_id( local_port_wwn, remote_port_wwn, fcp_lun) # This may raise a DiskNotFound exception if the disks # are meanwhile claimed by the MPIO service. disk_paths = [ self._diskutils.get_device_name_by_device_number( dev_num) for dev_num in dev_nums] return disk_paths except os_win_exc.FCException as ex: LOG.debug("Failed to retrieve volume paths by SCSI id. " "Exception: %s", ex) continue return [] def _get_dev_nums_by_scsi_id(self, local_port_wwn, remote_port_wwn, fcp_lun): LOG.debug("Fetching SCSI Unique ID for FCP lun %(fcp_lun)s. " "Port WWN: %(local_port_wwn)s. " "Remote port WWN: %(remote_port_wwn)s.", dict(fcp_lun=fcp_lun, local_port_wwn=local_port_wwn, remote_port_wwn=remote_port_wwn)) local_hba_wwn = self._get_fc_hba_wwn_for_port(local_port_wwn) # This will return the SCSI identifiers in the order of precedence # used by Windows. identifiers = self._fc_utils.get_scsi_device_identifiers( local_hba_wwn, local_port_wwn, remote_port_wwn, fcp_lun) if identifiers: identifier = identifiers[0] dev_nums = self._diskutils.get_disk_numbers_by_unique_id( unique_id=identifier['id'], unique_id_format=identifier['type']) return dev_nums return [] def _get_fc_hba_wwn_for_port(self, port_wwn): fc_hba_ports = self._fc_utils.get_fc_hba_ports() for port in fc_hba_ports: if port_wwn.upper() == port['port_name']: return port['node_name'] err_msg = _("Could not find any FC HBA port " "having WWN '%s'.") % port_wwn raise exception.NotFound(err_msg) @utils.trace def disconnect_volume(self, connection_properties, device_info=None, force=False, ignore_errors=False): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/windows/iscsi.py0000664000175000017500000001634700000000000022403 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ from os_brick.initiator.connectors import base_iscsi from os_brick.initiator.windows import base as win_conn_base from os_brick import utils LOG = logging.getLogger(__name__) class WindowsISCSIConnector(win_conn_base.BaseWindowsConnector, base_iscsi.BaseISCSIConnector): def __init__(self, *args, **kwargs): super(WindowsISCSIConnector, self).__init__(*args, **kwargs) self.use_multipath = kwargs.pop('use_multipath', False) self.initiator_list = kwargs.pop('initiator_list', []) self._iscsi_utils = utilsfactory.get_iscsi_initiator_utils() self.validate_initiators() def validate_initiators(self): """Validates the list of requested initiator HBAs Validates the list of requested initiator HBAs to be used when establishing iSCSI sessions. """ valid_initiator_list = True if not self.initiator_list: LOG.info("No iSCSI initiator was explicitly requested. " "The Microsoft iSCSI initiator will choose the " "initiator when establishing sessions.") else: available_initiators = self._iscsi_utils.get_iscsi_initiators() for initiator in self.initiator_list: if initiator not in available_initiators: LOG.warning("The requested initiator %(req_initiator)s " "is not in the list of available initiators: " "%(avail_initiators)s.", dict(req_initiator=initiator, avail_initiators=available_initiators)) valid_initiator_list = False return valid_initiator_list def get_initiator(self): """Returns the iSCSI initiator node name.""" return self._iscsi_utils.get_iscsi_initiator() @staticmethod def get_connector_properties(*args, **kwargs): iscsi_utils = utilsfactory.get_iscsi_initiator_utils() initiator = iscsi_utils.get_iscsi_initiator() return dict(initiator=initiator) def _get_all_paths(self, connection_properties): initiator_list = self.initiator_list or [None] all_targets = self._get_all_targets(connection_properties) paths = [(initiator_name, target_portal, target_iqn, target_lun) for target_portal, target_iqn, target_lun in all_targets for initiator_name in initiator_list] return paths @utils.trace def connect_volume(self, connection_properties): connected_target_mappings = set() volume_connected = False for (initiator_name, target_portal, target_iqn, target_lun) in self._get_all_paths(connection_properties): try: LOG.info("Attempting to establish an iSCSI session to " "target %(target_iqn)s on portal %(target_portal)s " "accessing LUN %(target_lun)s using initiator " "%(initiator_name)s.", dict(target_portal=target_portal, target_iqn=target_iqn, target_lun=target_lun, initiator_name=initiator_name)) self._iscsi_utils.login_storage_target( target_lun=target_lun, target_iqn=target_iqn, target_portal=target_portal, auth_username=connection_properties.get('auth_username'), auth_password=connection_properties.get('auth_password'), mpio_enabled=self.use_multipath, initiator_name=initiator_name, ensure_lun_available=False) connected_target_mappings.add((target_iqn, target_lun)) if not self.use_multipath: break except os_win_exc.OSWinException: LOG.exception("Could not establish the iSCSI session.") for target_iqn, target_lun in connected_target_mappings: try: (device_number, device_path) = self._iscsi_utils.get_device_number_and_path( target_iqn, target_lun, retry_attempts=self.device_scan_attempts, retry_interval=self.device_scan_interval, rescan_disks=True, ensure_mpio_claimed=self.use_multipath) volume_connected = True except os_win_exc.OSWinException: LOG.exception("Could not retrieve device path for target " "%(target_iqn)s and lun %(target_lun)s.", dict(target_iqn=target_iqn, target_lun=target_lun)) if not volume_connected: raise exception.BrickException( _("Could not connect volume %s.") % connection_properties) scsi_wwn = self._get_scsi_wwn(device_number) device_info = {'type': 'block', 'path': device_path, 'number': device_number, 'scsi_wwn': scsi_wwn} return device_info @utils.trace def disconnect_volume(self, connection_properties, device_info=None, force=False, ignore_errors=False): # We want to refresh the cached information first. self._diskutils.rescan_disks() for (target_portal, target_iqn, target_lun) in self._get_all_targets(connection_properties): luns = self._iscsi_utils.get_target_luns(target_iqn) # We disconnect the target only if it does not expose other # luns which may be in use. if not luns or luns == [target_lun]: self._iscsi_utils.logout_storage_target(target_iqn) @utils.trace def get_volume_paths(self, connection_properties): device_paths = set() for (target_portal, target_iqn, target_lun) in self._get_all_targets(connection_properties): (device_number, device_path) = self._iscsi_utils.get_device_number_and_path( target_iqn, target_lun, ensure_mpio_claimed=self.use_multipath) if device_path: device_paths.add(device_path) self._check_device_paths(device_paths) return list(device_paths) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/windows/rbd.py0000664000175000017500000001510700000000000022031 0ustar00zuulzuul00000000000000# Copyright 2020 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ctypes import errno import json from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from os_brick import exception from os_brick.i18n import _ from os_brick.initiator.connectors import base_rbd from os_brick.initiator.windows import base as win_conn_base from os_brick import utils LOG = logging.getLogger(__name__) class WindowsRBDConnector(base_rbd.RBDConnectorMixin, win_conn_base.BaseWindowsConnector): """Connector class to attach/detach RBD volumes. The Windows RBD connector is very similar to the Linux one. There are a few main differences though: * the Ceph python bindings are not available on Windows yet, so we'll always do a local mount. Besides, Hyper-V cannot use librbd, so we'll need to do a local mount anyway. * The device names aren't handled in the same way. On Windows, disk names such as "\\\\.\\PhysicalDrive1" are provided by the OS and cannot be explicitly requsted. """ def __init__(self, *args, **kwargs): super(WindowsRBDConnector, self).__init__(*args, **kwargs) self._ensure_rbd_available() def _check_rbd(self): cmd = ['where.exe', 'rbd'] try: self._execute(*cmd) return True except processutils.ProcessExecutionError: LOG.warning("rbd.exe is not available.") return False def _ensure_rbd_available(self): if not self._check_rbd(): msg = _("rbd.exe is not available.") LOG.error(msg) raise exception.BrickException(msg) def get_volume_paths(self, connection_properties): return [self.get_device_name(connection_properties)] def _show_rbd_mapping(self, connection_properties): # TODO(lpetrut): consider using "rbd device show" if/when # it becomes available. cmd = ['rbd-wnbd', 'show', connection_properties['name'], '--format', 'json'] try: out, err = self._execute(*cmd) return json.loads(out) except processutils.ProcessExecutionError as ex: if abs(ctypes.c_int32(ex.exit_code).value) == errno.ENOENT: LOG.debug("Couldn't find RBD mapping: %s", connection_properties['name']) return raise except json.decoder.JSONDecodeError: msg = _("Could not get rbd mappping.") LOG.exception(msg) raise exception.BrickException(msg) def get_device_name(self, connection_properties, expect=True): mapping = self._show_rbd_mapping(connection_properties) if mapping: dev_num = mapping['disk_number'] LOG.debug( "Located RBD mapping: %(image)s. " "Disk number: %(disk_number)s.", dict(image=connection_properties['name'], disk_number=dev_num)) return self._diskutils.get_device_name_by_device_number(dev_num) elif expect: msg = _("The specified RBD image is not mounted: %s") raise exception.VolumeDeviceNotFound( msg % connection_properties['name']) def _wait_for_volume(self, connection_properties): """Wait for the specified volume to become accessible.""" attempt = 0 dev_path = None def _check_rbd_device(): rbd_dev_path = self.get_device_name( connection_properties, expect=False) if rbd_dev_path: try: # Under high load, it can take a second before the disk # becomes accessible. with open(rbd_dev_path, 'rb'): pass nonlocal dev_path dev_path = rbd_dev_path raise loopingcall.LoopingCallDone() except FileNotFoundError: LOG.debug("The RBD image %(image)s mapped to local device " "%(dev)s isn't available yet.", {'image': connection_properties['name'], 'dev': rbd_dev_path}) nonlocal attempt attempt += 1 if attempt >= self.device_scan_attempts: msg = _("The mounted RBD image isn't available: %s") raise exception.VolumeDeviceNotFound( msg % connection_properties['name']) timer = loopingcall.FixedIntervalLoopingCall(_check_rbd_device) timer.start(interval=self.device_scan_interval).wait() return dev_path @utils.trace def connect_volume(self, connection_properties): rbd_dev_path = self.get_device_name(connection_properties, expect=False) if not rbd_dev_path: cmd = ['rbd', 'device', 'map', connection_properties['name']] cmd += self._get_rbd_args(connection_properties) self._execute(*cmd) rbd_dev_path = self._wait_for_volume(connection_properties) else: LOG.debug('The RBD image %(image)s is already mapped to local ' 'device %(dev)s', {'image': connection_properties['name'], 'dev': rbd_dev_path}) dev_num = self._diskutils.get_device_number_from_device_name( rbd_dev_path) # TODO(lpetrut): remove this once wnbd honors the SAN policy setting. self._diskutils.set_disk_offline(dev_num) return {'path': rbd_dev_path, 'type': 'block'} @utils.trace def disconnect_volume(self, connection_properties, device_info=None, force=False, ignore_errors=False): cmd = ['rbd', 'device', 'unmap', connection_properties['name']] cmd += self._get_rbd_args(connection_properties) if force: cmd += ["-o", "hard-disconnect"] self._execute(*cmd) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/initiator/windows/smbfs.py0000664000175000017500000001211300000000000022366 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from os_win import utilsfactory from os_brick.initiator.windows import base as win_conn_base from os_brick.remotefs import windows_remotefs as remotefs from os_brick import utils # The Windows SMBFS connector expects to receive VHD/x images stored on SMB # shares, exposed by the Cinder SMBFS driver. class WindowsSMBFSConnector(win_conn_base.BaseWindowsConnector): def __init__(self, *args, **kwargs): super(WindowsSMBFSConnector, self).__init__(*args, **kwargs) # If this flag is set, we use the local paths in case of local # shares. This is in fact mandatory in some cases, for example # for the Hyper-C scenario. self._local_path_for_loopback = kwargs.get('local_path_for_loopback', True) self._expect_raw_disk = kwargs.get('expect_raw_disk', False) self._remotefsclient = remotefs.WindowsRemoteFsClient( mount_type='smbfs', *args, **kwargs) self._smbutils = utilsfactory.get_smbutils() self._vhdutils = utilsfactory.get_vhdutils() self._diskutils = utilsfactory.get_diskutils() @staticmethod def get_connector_properties(*args, **kwargs): # No connector properties updates in this case. return {} @utils.trace def connect_volume(self, connection_properties): self.ensure_share_mounted(connection_properties) # This will be a virtual disk image path. disk_path = self._get_disk_path(connection_properties) if self._expect_raw_disk: # The caller expects a direct accessible raw disk. We'll # mount the image and bring the new disk offline, which will # allow direct IO, while ensuring that any partiton residing # on it will be unmounted. read_only = connection_properties.get('access_mode') == 'ro' self._vhdutils.attach_virtual_disk(disk_path, read_only=read_only) raw_disk_path = self._vhdutils.get_virtual_disk_physical_path( disk_path) dev_num = self._diskutils.get_device_number_from_device_name( raw_disk_path) self._diskutils.set_disk_offline(dev_num) else: raw_disk_path = None device_info = {'type': 'file', 'path': raw_disk_path if self._expect_raw_disk else disk_path} return device_info @utils.trace def disconnect_volume(self, connection_properties, device_info=None, force=False, ignore_errors=False): export_path = self._get_export_path(connection_properties) disk_path = self._get_disk_path(connection_properties) # The detach method will silently continue if the disk is # not attached. self._vhdutils.detach_virtual_disk(disk_path) self._remotefsclient.unmount(export_path) def _get_export_path(self, connection_properties): return connection_properties['export'].replace('/', '\\') def _get_disk_path(self, connection_properties): # This is expected to be the share address, as an UNC path. export_path = self._get_export_path(connection_properties) mount_base = self._remotefsclient.get_mount_base() use_local_path = (self._local_path_for_loopback and self._smbutils.is_local_share(export_path)) disk_dir = export_path if mount_base: # This will be a symlink pointing to either the share # path directly or to the local share path, if requested # and available. disk_dir = self._remotefsclient.get_mount_point( export_path) elif use_local_path: disk_dir = self._remotefsclient.get_local_share_path(export_path) disk_name = connection_properties['name'] disk_path = os.path.join(disk_dir, disk_name) return disk_path def get_search_path(self): return self._remotefsclient.get_mount_base() @utils.trace def get_volume_paths(self, connection_properties): return [self._get_disk_path(connection_properties)] def ensure_share_mounted(self, connection_properties): export_path = self._get_export_path(connection_properties) mount_options = connection_properties.get('options') self._remotefsclient.mount(export_path, mount_options) def extend_volume(self, connection_properties): raise NotImplementedError ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.611759 os_brick-6.11.0/os_brick/local_dev/0000775000175000017500000000000000000000000017140 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/local_dev/__init__.py0000664000175000017500000000000000000000000021237 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/local_dev/lvm.py0000664000175000017500000007573300000000000020327 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """LVM class for performing LVM operations.""" from __future__ import annotations import math import os import re from typing import Any, Callable, Optional from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import excutils from os_brick import exception from os_brick import executor from os_brick.privileged import rootwrap as priv_rootwrap from os_brick import utils LOG = logging.getLogger(__name__) class LVM(executor.Executor): """LVM object to enable various LVM related operations.""" LVM_CMD_PREFIX = ['env', 'LC_ALL=C'] def __init__(self, vg_name: str, root_helper: str, create_vg: bool = False, physical_volumes: Optional[list] = None, lvm_type: Optional[str] = 'default', executor: Optional[Callable] = None, lvm_conf=None, suppress_fd_warn: Optional[bool] = False): """Initialize the LVM object. The LVM object is based on an LVM VolumeGroup, one instantiation for each VolumeGroup you have/use. :param vg_name: Name of existing VG or VG to create :param root_helper: Execution root_helper method to use :param create_vg: Indicates the VG doesn't exist and we want to create it :param physical_volumes: List of PVs to build VG on :param lvm_type: VG and Volume type (default, or thin) :param executor: Execute method to use, None uses oslo_concurrency.processutils :param suppress_fd_warn: Add suppress FD Warn to LVM env """ super(LVM, self).__init__(execute=executor, root_helper=root_helper) self.vg_name = vg_name self.pv_list = [] self.vg_size = 0.0 self.vg_free_space = 0.0 self.vg_lv_count = 0 self.vg_uuid = None self.vg_thin_pool = None self.vg_thin_pool_size = 0.0 self.vg_thin_pool_free_space = 0.0 self.vg_provisioned_capacity = 0.0 # Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX # before the first LVM command is executed, and use the directory # where the specified lvm_conf file is located as the value. # NOTE(jdg): We use the temp var here becuase LVM_CMD_PREFIX is a # class global and if you use append here, you'll literally just keep # appending values to the global. _lvm_cmd_prefix = ['env', 'LC_ALL=C'] if lvm_conf and os.path.isfile(lvm_conf): lvm_sys_dir = os.path.dirname(lvm_conf) _lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir) if suppress_fd_warn: _lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1') LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix if create_vg and physical_volumes is not None: try: self._create_vg(physical_volumes) except putils.ProcessExecutionError as err: LOG.exception('Error creating Volume Group') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) if self._vg_exists() is False: LOG.error('Unable to locate Volume Group %s', vg_name) raise exception.VolumeGroupNotFound(vg_name=vg_name) # NOTE: we assume that the VG has been activated outside of Cinder if lvm_type == 'thin': pool_name = "%s-pool" % self.vg_name if self.get_volume(pool_name) is None: try: self.create_thin_pool(pool_name) except putils.ProcessExecutionError: # Maybe we just lost the race against another copy of # this driver being in init in parallel - e.g. # cinder-volume and cinder-backup starting in parallel if self.get_volume(pool_name) is None: raise self.vg_thin_pool = pool_name self.activate_lv(self.vg_thin_pool) self.pv_list = self.get_all_physical_volumes(root_helper, vg_name) def _vg_exists(self) -> bool: """Simple check to see if VG exists. :returns: True if vg specified in object exists, else False """ exists = False cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '-o', 'name', self.vg_name] (out, _err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: volume_groups = out.split() if self.vg_name in volume_groups: exists = True return exists def _create_vg(self, pv_list: list[str]) -> None: cmd = ['vgcreate', self.vg_name, ','.join(pv_list)] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) @utils.retry(retry=utils.retry_if_exit_code, retry_param=139, interval=0.5, backoff_rate=0.5) def _run_lvm_command(self, cmd_arg_list: list[str], root_helper: Optional[str] = None, run_as_root: bool = True) -> tuple[str, str]: """Run LVM commands with a retry on code 139 to work around LVM bugs. Refer to LP bug 1901783, LP bug 1932188. """ if not root_helper: root_helper = self._root_helper (out, err) = self._execute(*cmd_arg_list, root_helper=root_helper, run_as_root=run_as_root) return (out, err) def _get_vg_uuid(self) -> list: cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '-o', 'uuid', self.vg_name] (out, _err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: return out.split() else: return [] def _get_thin_pool_free_space(self, vg_name: str, thin_pool_name: str) -> float: """Returns available thin pool free space. :param vg_name: the vg where the pool is placed :param thin_pool_name: the thin pool to gather info for :returns: Free space in GB (float), calculated using data_percent """ cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g', '-o', 'size,data_percent', '--separator', ':', '--nosuffix'] # NOTE(gfidente): data_percent only applies to some types of LV so we # make sure to append the actual thin pool name cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name)) free_space = 0.0 try: (out, err) = self._run_lvm_command(cmd) if out is not None: out = out.strip() data = out.split(':') pool_size = float(data[0]) data_percent = float(data[1]) consumed_space = pool_size / 100 * data_percent free_space = pool_size - consumed_space free_space = round(free_space, 2) # Need noqa due to a false error about the 'err' variable being unused # even though it is used in the logging. Possibly related to # https://github.com/PyCQA/pyflakes/issues/378. except putils.ProcessExecutionError as err: # noqa LOG.exception('Error querying thin pool about data_percent') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) return free_space @staticmethod def get_lvm_version(root_helper: str) -> tuple: """Static method to get LVM version from system. :param root_helper: root_helper to use for execute :returns: version 3-tuple """ cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version'] (out, _err) = priv_rootwrap.execute(*cmd, root_helper=root_helper, run_as_root=True) lines = out.split('\n') for line in lines: if 'LVM version' in line: version_list = line.split() # NOTE(gfidente): version is formatted as follows: # major.minor.patchlevel(library API version)[-customisation] version = version_list[2] version_filter = r"(\d+)\.(\d+)\.(\d+).*" r = re.search(version_filter, version) if r is None: raise exception.BrickException( message='Cannot parse LVM version') version_tuple = tuple(map(int, r.group(1, 2, 3))) return version_tuple raise exception.BrickException(message='Cannot parse LVM version') @staticmethod @utils.retry(retry=utils.retry_if_exit_code, retry_param=139, interval=0.5, backoff_rate=0.5) # Bug#1901783 def get_lv_info(root_helper: str, vg_name: Optional[str] = None, lv_name: Optional[str] = None) -> list[dict[str, Any]]: """Retrieve info about LVs (all, in a VG, or a single LV). :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :param lv_name: optional, gathers info for only the specified LV :returns: List of Dictionaries with LV info """ cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size', '--nosuffix'] if lv_name is not None and vg_name is not None: cmd.append("%s/%s" % (vg_name, lv_name)) elif vg_name is not None: cmd.append(vg_name) try: (out, _err) = priv_rootwrap.execute(*cmd, root_helper=root_helper, run_as_root=True) except putils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(reraise=True) as ctx: if "not found" in err.stderr or "Failed to find" in err.stderr: ctx.reraise = False LOG.info("Logical Volume not found when querying " "LVM info. (vg_name=%(vg)s, lv_name=%(lv)s", {'vg': vg_name, 'lv': lv_name}) out = None lv_list = [] if out is not None: volumes = out.split() iterator = zip(*[iter(volumes)] * 3) # pylint: disable=E1101 for vg, name, size in iterator: lv_list.append({"vg": vg, "name": name, "size": size}) return lv_list def get_volumes(self, lv_name: Optional[str] = None) -> list[dict]: """Get all LV's associated with this instantiation (VG). :returns: List of Dictionaries with LV info """ return self.get_lv_info(self._root_helper, self.vg_name, lv_name) def get_volume(self, name: str) -> Optional[dict]: """Get reference object of volume specified by name. :returns: dict representation of Logical Volume if exists """ ref_list = self.get_volumes(name) for r in ref_list: if r['name'] == name: return r return None @staticmethod def get_all_physical_volumes( root_helper: str, vg_name: Optional[str] = None) -> list[dict[str, Any]]: """Static method to get all PVs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with PV info """ field_sep = '|' cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size,free', '--separator', field_sep, '--nosuffix'] (out, _err) = priv_rootwrap.execute(*cmd, root_helper=root_helper, run_as_root=True) pvs = out.split() if vg_name is not None: pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]] pv_list = [] for pv in pvs: fields = pv.split(field_sep) pv_list.append({'vg': fields[0], 'name': fields[1], 'size': float(fields[2]), 'available': float(fields[3])}) return pv_list def get_physical_volumes(self) -> list[dict[str, Any]]: """Get all PVs associated with this instantiation (VG). :returns: List of Dictionaries with PV info """ self.pv_list = self.get_all_physical_volumes(self._root_helper, self.vg_name) return self.pv_list @staticmethod def get_all_volume_groups( root_helper: str, vg_name: Optional[str] = None) -> list[dict[str, Any]]: """Static method to get all VGs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with VG info """ cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '--unit=g', '-o', 'name,size,free,lv_count,uuid', '--separator', ':', '--nosuffix'] if vg_name is not None: cmd.append(vg_name) (out, _err) = priv_rootwrap.execute(*cmd, root_helper=root_helper, run_as_root=True) vg_list = [] if out is not None: vgs = out.split() for vg in vgs: fields = vg.split(':') vg_list.append({'name': fields[0], 'size': float(fields[1]), 'available': float(fields[2]), 'lv_count': int(fields[3]), 'uuid': fields[4]}) return vg_list def update_volume_group_info(self) -> None: """Update VG info for this instantiation. Used to update member fields of object and provide a dict of info for caller. :returns: Dictionaries of VG info """ vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) if len(vg_list) != 1: LOG.error('Unable to find VG: %s', self.vg_name) raise exception.VolumeGroupNotFound(vg_name=self.vg_name) self.vg_size = float(vg_list[0]['size']) self.vg_free_space = float(vg_list[0]['available']) self.vg_lv_count = int(vg_list[0]['lv_count']) self.vg_uuid = vg_list[0]['uuid'] total_vols_size = 0.0 if self.vg_thin_pool is not None: # NOTE(xyang): If providing only self.vg_name, # get_lv_info will output info on the thin pool and all # individual volumes. # get_lv_info(self._root_helper, 'stack-vg') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg # stack-vg stack-pool 9.51 # stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00 # stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00 # stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00 # # If providing both self.vg_name and self.vg_thin_pool, # get_lv_info will output only info on the thin pool, but not # individual volumes. # get_lv_info(self._root_helper, 'stack-vg', 'stack-pool') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg/stack-pool # stack-vg stack-pool 9.51 # # We need info on both the thin pool and the volumes, # therefore we should provide only self.vg_name, but not # self.vg_thin_pool here. for lv in self.get_lv_info(self._root_helper, self.vg_name): lvsize = lv['size'] # get_lv_info runs "lvs" command with "--nosuffix". # This removes "g" from "1.00g" and only outputs "1.00". # Running "lvs" command without "--nosuffix" will output # "1.00g" if "g" is the unit. # Remove the unit if it is in lv['size']. if not lv['size'][-1].isdigit(): lvsize = lvsize[:-1] if lv['name'] == self.vg_thin_pool: self.vg_thin_pool_size = float(lvsize) tpfs = self._get_thin_pool_free_space(self.vg_name, self.vg_thin_pool) self.vg_thin_pool_free_space = tpfs else: total_vols_size = total_vols_size + float(lvsize) total_vols_size = round(total_vols_size, 2) self.vg_provisioned_capacity = total_vols_size def _calculate_thin_pool_size(self) -> list[str]: """Calculates the correct size for a thin pool. Some amount of free space must remain in the volume group for metadata for the contained logical volumes. The exact amount depends on how much volume sharing you expect. :returns: An lvcreate-ready string for the number of calculated bytes. """ # make sure volume group information is current self.update_volume_group_info() return ["-l", "100%FREE"] def create_thin_pool(self, name: Optional[str] = None) -> None: """Creates a thin provisioning pool for this VG. The syntax here is slightly different than the default lvcreate -T, so we'll just write a custom cmd here and do it. :param name: Name to use for pool, default is "-pool" :returns: The size string passed to the lvcreate command """ if name is None: name = '%s-pool' % self.vg_name vg_pool_name = '%s/%s' % (self.vg_name, name) size_args = self._calculate_thin_pool_size() cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T'] cmd.extend(size_args) cmd.append(vg_pool_name) LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of " "total %(free)sg", {'pool': vg_pool_name, 'size': size_args, 'free': self.vg_free_space}) self._run_lvm_command(cmd) self.vg_thin_pool = name return def create_volume(self, name: str, size_str: str, lv_type: str = 'default', mirror_count: int = 0) -> None: """Creates a logical volume on the object's VG. :param name: Name to use when creating Logical Volume :param size_str: Size to use when creating Logical Volume :param lv_type: Type of Volume (default or thin) :param mirror_count: Use LVM mirroring with specified count """ if lv_type == 'thin': pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path] else: cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name, '-L', size_str] if mirror_count > 0: cmd.extend(['-m', str(mirror_count), '--nosync', '--mirrorlog', 'mirrored']) terras = int(size_str[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD cmd.extend(['-R', str(rsize)]) try: self._run_lvm_command(cmd) except putils.ProcessExecutionError as err: LOG.exception('Error creating Volume') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise @utils.retry(putils.ProcessExecutionError) def create_lv_snapshot(self, name: str, source_lv_name: str, lv_type: str = 'default') -> None: """Creates a snapshot of a logical volume. :param name: Name to assign to new snapshot :param source_lv_name: Name of Logical Volume to snapshot :param lv_type: Type of LV (default or thin) """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: LOG.error("Trying to create snapshot by non-existent LV: %s", source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '-k', 'y', '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] if lv_type != 'thin': size = source_lvref['size'] cmd.extend(['-L', '%sg' % (size)]) try: self._run_lvm_command(cmd) except putils.ProcessExecutionError as err: LOG.exception('Error creating snapshot') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise def _mangle_lv_name(self, name: str) -> str: # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not name.startswith('snapshot'): return name return '_' + name def _lv_is_active(self, name: str) -> bool: cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._run_lvm_command(cmd) if out: out = out.strip() # An example output might be '-wi-a----'; the 4th index specifies # the status of the volume. 'a' for active, '-' for inactive. if (out[4] == 'a'): return True return False def deactivate_lv(self, name: str) -> None: lv_path = self.vg_name + '/' + self._mangle_lv_name(name) cmd = ['lvchange', '-a', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception('Error deactivating LV') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise # Wait until lv is deactivated to return in # order to prevent a race condition. self._wait_for_volume_deactivation(name) @utils.retry(retry_param=exception.VolumeNotDeactivated, retries=5, backoff_rate=2) def _wait_for_volume_deactivation(self, name: str) -> None: LOG.debug("Checking to see if volume %s has been deactivated.", name) if self._lv_is_active(name): LOG.debug("Volume %s is still active.", name) raise exception.VolumeNotDeactivated(name=name) else: LOG.debug("Volume %s has been deactivated.", name) def activate_lv(self, name: str, is_snapshot: bool = False, permanent: bool = False) -> None: """Ensure that logical volume/snapshot logical volume is activated. :param name: Name of LV to activate :param is_snapshot: whether LV is a snapshot :param permanent: whether we should drop skipactivation flag :raises: putils.ProcessExecutionError """ lv_path = self.vg_name + '/' + self._mangle_lv_name(name) # Must pass --yes to activate both the snap LV and its origin LV. # Otherwise lvchange asks if you would like to do this interactively, # and fails. cmd = ['lvchange', '-a', 'y', '--yes'] cmd.append('-K') # If permanent=True is specified, drop the skipactivation flag in # order to make this LV automatically activated after next reboot. if permanent: cmd += ['-k', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception('Error activating LV') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise @utils.retry(putils.ProcessExecutionError) def delete(self, name: str) -> None: """Delete logical volume or snapshot. :param name: Name of LV to delete """ def run_udevadm_settle(): self._execute('udevadm', 'settle', root_helper=self._root_helper, run_as_root=True, check_exit_code=False) # LV removal seems to be a race with other writers or udev in # some cases (see LP #1270192), so we enable retry deactivation LVM_CONFIG = 'activation { retry_deactivation = 1} ' try: self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.debug('Error reported running lvremove: CMD: %(command)s, ' 'RESPONSE: %(response)s', {'command': err.cmd, 'response': err.stderr}) LOG.debug('Attempting udev settle and retry of lvremove...') run_udevadm_settle() # The previous failing lvremove -f might leave behind # suspended devices; when lvmetad is not available, any # further lvm command will block forever. # Therefore we need to skip suspended devices on retry. LVM_CONFIG += 'devices { ignore_suspended_devices = 1}' self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) LOG.debug('Successfully deleted volume: %s after ' 'udev settle.', name) def revert(self, snapshot_name: str) -> None: """Revert an LV from snapshot. :param snapshot_name: Name of snapshot to revert """ self._execute('lvconvert', '--merge', snapshot_name, root_helper=self._root_helper, run_as_root=True) def lv_has_snapshot(self, name: str) -> bool: cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '--readonly', '%s/%s' % (self.vg_name, name)] out, _err = self._run_lvm_command(cmd) if out: out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): return True return False def extend_volume(self, lv_name: str, new_size) -> None: """Extend the size of an existing volume.""" # Volumes with snaps have attributes 'o' or 'O' and will be # deactivated, but Thin Volumes with snaps have attribute 'V' # and won't be deactivated because the lv_has_snapshot method looks # for 'o' or 'O' if self.lv_has_snapshot(lv_name): self.deactivate_lv(lv_name) try: cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size, '%s/%s' % (self.vg_name, lv_name)] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception('Error extending Volume') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise def vg_mirror_free_space(self, mirror_count: int) -> float: free_capacity = 0.0 disks = [] for pv in self.pv_list: disks.append(float(pv['available'])) while True: disks = sorted([a for a in disks if a > 0.0], reverse=True) if len(disks) <= mirror_count: break # consume the smallest disk disk = disks[-1] disks = disks[:-1] # match extents for each mirror on the largest disks for index in list(range(mirror_count)): disks[index] -= disk free_capacity += disk return free_capacity def vg_mirror_size(self, mirror_count: int) -> float: return (self.vg_free_space / (mirror_count + 1)) def rename_volume(self, lv_name: str, new_name: str) -> None: """Change the name of an existing volume.""" try: self._execute('lvrename', self.vg_name, lv_name, new_name, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception('Error renaming logical volume') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/opts.py0000664000175000017500000000534000000000000016551 0ustar00zuulzuul00000000000000# Copyright (c) 2022, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg _opts = [ cfg.StrOpt('lock_path', default=None, # Set by set_defaults method below on setup help='Directory to use for os-brick lock files. Defaults to ' 'oslo_concurrency.lock_path which is a sensible default ' 'for compute nodes, but not for HCI deployments or ' 'controllers where Glance uses Cinder as a backend, as ' 'locks should use the same directory.'), cfg.IntOpt('wait_mpath_device_attempts', default=4, min=1, help='Number of attempts for the multipath device to be ready ' 'for I/O after it was created. Readiness is checked with ' '``multipath -C``. See related ' '``wait_mpath_device_interval`` config option. Default ' 'value is 4.'), cfg.IntOpt('wait_mpath_device_interval', default=1, min=1, help='Interval value to wait for multipath device to be ready ' 'for I/O. Max number of attempts is set in ' '``wait_mpath_device_attempts``. Time in seconds to wait ' 'for each retry is ``base ^ attempt * interval``, so for ' '4 attempts (1 attempt 3 retries) and 1 second interval ' 'will yield: 2, 4 and 8 seconds. Note that there is no ' 'wait before first attempt. Default value is 1.'), ] cfg.CONF.register_opts(_opts, group='os_brick') def list_opts(): """oslo.config.opts entrypoint for sample config generation.""" return [('os_brick', _opts)] def set_defaults(conf=cfg.CONF): """Set default values that depend on other libraries. Service configuration options must have been initialized before this call because oslo's lock_path doesn't have a value before that. Called from both os_brick setup and from the oslo.config.opts entrypoint for sample config generation. """ conf.set_default('lock_path', conf.oslo_concurrency.lock_path, 'os_brick') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.615759 os_brick-6.11.0/os_brick/privileged/0000775000175000017500000000000000000000000017342 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/privileged/__init__.py0000664000175000017500000000235600000000000021461 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_privsep import capabilities as c from oslo_privsep import priv_context capabilities = [c.CAP_SYS_ADMIN] # On virtual environments libraries are not owned by the Daemon user (root), so # the Daemon needs the capability to bypass file read permission checks in # order to dynamically load the code to run. if os.environ.get('VIRTUAL_ENV'): capabilities.append(c.CAP_DAC_READ_SEARCH) # It is expected that most (if not all) os-brick operations can be # executed with these privileges. default = priv_context.PrivContext( __name__, cfg_section='privsep_osbrick', pypath=__name__ + '.default', capabilities=capabilities, logger_name=__name__, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/privileged/lightos.py0000664000175000017500000000166300000000000021373 0ustar00zuulzuul00000000000000# Copyright (C) 2016-2022 Lightbits Labs Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import shutil from oslo_utils import fileutils import os_brick.privileged @os_brick.privileged.default.entrypoint def delete_dsc_file(file_name): return fileutils.delete_if_exists(file_name) @os_brick.privileged.default.entrypoint def move_dsc_file(src, dst): return shutil.move(src, dst) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/privileged/nvmeof.py0000664000175000017500000001141600000000000021211 0ustar00zuulzuul00000000000000# Copyright (c) 2021, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import errno import os from typing import Optional from oslo_concurrency import processutils as putils from oslo_log import log as logging import os_brick.privileged from os_brick.privileged import rootwrap LOG = logging.getLogger(__name__) @os_brick.privileged.default.entrypoint def create_hostnqn(system_uuid: Optional[str] = None) -> str: """Create the hostnqn file to speed up finding out the nqn. By having the /etc/nvme/hostnqn not only do we make sure that that value is always used on this system, but we are also able to just open the file to get the nqn on each get_connector_properties call instead of having to make a call to nvme show-hostnqn command. In newer nvme-cli versions calling show-hostnqn will not only try to locate the file (which we know doesn't exist or this method wouldn't have been called), but it will also generate one. In older nvme-cli versions that is not the case. """ host_nqn = '' try: os.makedirs('/etc/nvme', mode=0o755, exist_ok=True) # If we have the system's unique uuid we can just write the file if system_uuid: host_nqn = 'nqn.2014-08.org.nvmexpress:uuid:' + system_uuid else: # Try to get existing nqn generated from dmi or systemd try: host_nqn, err = rootwrap.custom_execute('nvme', 'show-hostnqn') host_nqn = host_nqn.strip() # This is different from OSError's ENOENT, which is missing nvme # command. This ENOENT is when nvme says there isn't an nqn. except putils.ProcessExecutionError as e: # nvme-cli's error are all over the place, so merge the output err_msg = e.stdout + '\n' + e.stderr msg = err_msg.casefold() if 'error: invalid sub-command' in msg: LOG.debug('Version too old cannot check current hostnqn.') elif 'hostnqn is not available' in msg: LOG.debug('Version too old to return hostnqn from non ' 'file sources') elif e.exit_code == errno.ENOENT: LOG.debug('No nqn could be formed from dmi or systemd.') else: LOG.debug('Unknown error from nvme show-hostnqn: %s', err_msg) raise if not host_nqn: LOG.debug('Generating nqn') host_nqn, err = rootwrap.custom_execute('nvme', 'gen-hostnqn') host_nqn = host_nqn.strip() with open('/etc/nvme/hostnqn', 'w') as f: LOG.debug('Writing hostnqn file') f.write(host_nqn) os.chmod('/etc/nvme/hostnqn', 0o644) except Exception as e: LOG.warning("Could not generate host nqn: %s", e) return host_nqn @os_brick.privileged.default.entrypoint def get_system_uuid() -> str: # RSD requires system_uuid to let Cinder RSD Driver identify # Nova node for later RSD volume attachment. try: with open('/sys/class/dmi/id/product_uuid', 'r') as f: return f.read().strip() except Exception: LOG.debug("Could not read dmi's 'product_uuid' on sysfs") try: out, err = rootwrap.custom_execute('dmidecode', '-ssystem-uuid') if not out: LOG.warning('dmidecode returned empty system-uuid') except (putils.ProcessExecutionError, FileNotFoundError) as e: LOG.debug("Unable to locate dmidecode. For Cinder RSD Backend," " please make sure it is installed: %s", e) out = "" return out.strip() @os_brick.privileged.default.entrypoint def create_hostid(uuid: str) -> Optional[str]: """Create the hostid to ensure it's always the same.""" try: os.makedirs('/etc/nvme', mode=0o755, exist_ok=True) with open('/etc/nvme/hostid', 'w') as f: LOG.debug('Writing nvme hostid %s', uuid) f.write(f'{uuid}\n') os.chmod('/etc/nvme/hostid', 0o644) except Exception as e: LOG.warning("Could not generate nvme host id: %s", e) return None return uuid ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/privileged/rbd.py0000664000175000017500000000401300000000000020461 0ustar00zuulzuul00000000000000# Copyright (c) 2020, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from typing import Optional from oslo_utils import fileutils from oslo_utils import importutils import os_brick.privileged # Lazy load the rbd module to avoid circular references RBDConnector: Optional['os_brick.initiator.connectors.rbd.RBDConnector'] = None def _get_rbd_class(): global RBDConnector global get_rbd_class # Lazy load the class if not RBDConnector: rbd_class_route = 'os_brick.initiator.connectors.rbd.RBDConnector' RBDConnector = importutils.import_class(rbd_class_route) # Job is done, following calls don't need to do anything get_rbd_class = lambda: None # noqa get_rbd_class = _get_rbd_class @os_brick.privileged.default.entrypoint def delete_if_exists(path): return fileutils.delete_if_exists(path) @os_brick.privileged.default.entrypoint def root_create_ceph_conf(monitor_ips, monitor_ports, cluster_name, user, keyring): """Create a .conf file for Ceph cluster only accessible by root.""" get_rbd_class() assert RBDConnector is not None return RBDConnector._create_ceph_conf(monitor_ips, monitor_ports, cluster_name, user, keyring) @os_brick.privileged.default.entrypoint def check_valid_path(path): get_rbd_class() assert RBDConnector is not None with open(path, 'rb') as rbd_handle: return RBDConnector._check_valid_device(rbd_handle) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/privileged/rootwrap.py0000664000175000017500000002250200000000000021572 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Just in case it wasn't clear, this is a massive security back-door. `execute_root()` (or the same via `execute(run_as_root=True)`) allows any command to be run as the privileged user (default "root"). This is intended only as an expedient transition and should be removed ASAP. This is not completely unreasonable because: 1. We have no tool/workflow for merging changes to rootwrap filter configs from os-brick into nova/cinder, which makes it difficult to evolve these loosely coupled projects. 2. Let's not pretend the earlier situation was any better. The rootwrap filters config contained several entries like "allow cp as root with any arguments", etc, and would have posed only a mild inconvenience to an attacker. At least with privsep we can (in principle) run the "root" commands as a non-root uid, with restricted Linux capabilities. The plan is to switch os-brick to privsep using this module (removing the urgency of (1)), then work on the larger refactor that addresses (2) in followup changes. """ import os import signal import threading import time from typing import Any, Iterable from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import strutils from os_brick import exception from os_brick import privileged LOG = logging.getLogger(__name__) def custom_execute(*cmd, **kwargs): """Custom execute with additional functionality on top of Oslo's. Additional features are timeouts and exponential backoff retries. The exponential backoff retries replaces standard Oslo random sleep times that range from 200ms to 2seconds when attempts is greater than 1, but it is disabled if delay_on_retry is passed as a parameter. Exponential backoff is controlled via interval and backoff_rate parameters, just like the os_brick.utils.retry decorator. To use the timeout mechanism to stop the subprocess with a specific signal after a number of seconds we must pass a non-zero timeout value in the call. When using multiple attempts and timeout at the same time the method will only raise the timeout exception to the caller if the last try timeouts. Timeout mechanism is controlled with timeout, signal, and raise_timeout parameters. :param interval: The multiplier :param backoff_rate: Base used for the exponential backoff :param timeout: Timeout defined in seconds :param signal: Signal to use to stop the process on timeout :param raise_timeout: Raise and exception on timeout or return error as stderr. Defaults to raising if check_exit_code is not False. :returns: Tuple with stdout and stderr """ # Since python 2 doesn't have nonlocal we use a mutable variable to store # the previous attempt number, the timeout handler, and the process that # timed out shared_data = [0, None, None] def on_timeout(proc): sanitized_cmd = strutils.mask_password(' '.join(cmd)) LOG.warning('Stopping %(cmd)s with signal %(signal)s after %(time)ss.', {'signal': sig_end, 'cmd': sanitized_cmd, 'time': timeout}) shared_data[2] = proc proc.send_signal(sig_end) def on_execute(proc): # Call user's on_execute method if on_execute_call: on_execute_call(proc) # Sleep if this is not the first try and we have a timeout interval if shared_data[0] and interval: exp = backoff_rate ** shared_data[0] wait_for = max(0, interval * exp) LOG.debug('Sleeping for %s seconds', wait_for) time.sleep(wait_for) # Increase the number of tries and start the timeout timer shared_data[0] += 1 if timeout: shared_data[2] = None shared_data[1] = threading.Timer(timeout, on_timeout, (proc,)) shared_data[1].start() def on_completion(proc): # This is always called regardless of success or failure # Cancel the timeout timer if shared_data[1]: shared_data[1].cancel() # Call user's on_completion method if on_completion_call: on_completion_call(proc) # We will be doing the wait ourselves in on_execute if 'delay_on_retry' in kwargs: interval = None else: kwargs['delay_on_retry'] = False interval = kwargs.pop('interval', 1) backoff_rate = kwargs.pop('backoff_rate', 2) # Operations performed by OS-Brick should be relatively quick. The longest # default timeout is probably the iSCSI ones, with 120 seconds. Since CLI # tools may get stuck we don't want to leave the timeout to infinite, so we # set the more than reasonable timeout of 10 minutes (600 seconds). timeout = kwargs.pop('timeout', 600) sig_end = kwargs.pop('signal', signal.SIGTERM) default_raise_timeout = kwargs.get('check_exit_code', True) raise_timeout = kwargs.pop('raise_timeout', default_raise_timeout) on_execute_call = kwargs.pop('on_execute', None) on_completion_call = kwargs.pop('on_completion', None) try: return putils.execute(on_execute=on_execute, on_completion=on_completion, *cmd, **kwargs) except putils.ProcessExecutionError: # proc is only stored if a timeout happened proc = shared_data[2] if proc: sanitized_cmd = strutils.mask_password(' '.join(cmd)) msg = ('Time out on proc %(pid)s after waiting %(time)s seconds ' 'when running %(cmd)s' % {'pid': proc.pid, 'time': timeout, 'cmd': sanitized_cmd}) LOG.debug(msg) if raise_timeout: raise exception.ExecutionTimeout(stdout='', stderr=msg, cmd=sanitized_cmd) return '', msg raise # Entrypoint used for rootwrap.py transition code. Don't use this for # other purposes, since it will be removed when we think the # transition is finished. def execute(*cmd, **kwargs): """NB: Raises processutils.ProcessExecutionError on failure.""" run_as_root = kwargs.pop('run_as_root', False) kwargs.pop('root_helper', None) try: if run_as_root: return execute_root(*cmd, **kwargs) else: return custom_execute(*cmd, **kwargs) except OSError as e: # Note: # putils.execute('bogus', run_as_root=True) # raises ProcessExecutionError(exit_code=1) (because there's a # "sh -c bogus" involved in there somewhere, but: # putils.execute('bogus', run_as_root=False) # raises OSError(not found). # # Lots of code in os-brick catches only ProcessExecutionError # and never encountered the latter when using rootwrap. # Rather than fix all the callers, we just always raise # ProcessExecutionError here :( sanitized_cmd = strutils.mask_password(' '.join(cmd)) raise putils.ProcessExecutionError( cmd=sanitized_cmd, description=str(e)) # See comment on `execute` @privileged.default.entrypoint def execute_root(*cmd, **kwargs): """NB: Raises processutils.ProcessExecutionError/OSError on failure.""" return custom_execute(*cmd, shell=False, run_as_root=False, **kwargs) @privileged.default.entrypoint def unlink_root(*links: Iterable[str], **kwargs: dict[str, Any]) -> None: """Unlink system links with sys admin privileges. By default it will raise an exception if a link does not exist and stop unlinking remaining links. This behavior can be modified passing optional parameters `no_errors` and `raise_at_end`. :param no_errors: Don't raise an exception on error "param raise_at_end: Don't raise an exception on first error, try to unlink all links and then raise a ChainedException with all the errors that where found. """ no_errors = kwargs.get('no_errors', False) raise_at_end = kwargs.get('raise_at_end', False) exc = exception.ExceptionChainer() catch_exception = no_errors or raise_at_end LOG.debug('Unlinking %s', links) for link in links: with exc.context(catch_exception, 'Unlink failed for %s', link): os.unlink(link) if not no_errors and raise_at_end and exc: raise exc @privileged.default.entrypoint def link_root(target, link_name, force=True): """Create a symbolic link with sys admin privileges. This method behaves like the "ln -s" command, including the force parameter where it will replace the link_name file even if it's not a symlink. """ LOG.debug('Linking %s -> %s', link_name, target) if force: try: os.remove(link_name) except FileNotFoundError: pass os.symlink(target, link_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/privileged/scaleio.py0000664000175000017500000000560400000000000021340 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from binascii import hexlify import configparser from contextlib import contextmanager from fcntl import ioctl import os import struct import uuid from os_brick import exception from os_brick import privileged SCINI_DEVICE_PATH = '/dev/scini' @contextmanager def open_scini_device(): """Open scini device for low-level I/O using contextmanager. File descriptor will be closed after all operations performed if it was opened successfully. :return: scini device file descriptor :rtype: int """ fd = None try: fd = os.open(SCINI_DEVICE_PATH, os.O_RDWR) yield fd finally: if fd: os.close(fd) @privileged.default.entrypoint def get_guid(op_code): """Query ScaleIO sdc GUID via ioctl request. :param op_code: operational code :type op_code: int :return: ScaleIO sdc GUID :rtype: str """ with open_scini_device() as fd: out = ioctl(fd, op_code, struct.pack('QQQ', 0, 0, 0)) # The first 8 bytes contain a return code that is not used # so they can be discarded. out_to_hex = hexlify(out[8:]).decode() return str(uuid.UUID(out_to_hex)) @privileged.default.entrypoint def rescan_vols(op_code): """Rescan ScaleIO volumes via ioctl request. :param op_code: operational code :type op_code: int """ with open_scini_device() as fd: ioctl(fd, op_code, struct.pack('Q', 0)) @privileged.default.entrypoint def get_connector_password(filename, config_group, failed_over): """Read ScaleIO connector configuration file and get appropriate password. :param filename: path to connector configuration file :type filename: str :param config_group: name of section in configuration file :type config_group: str :param failed_over: flag representing if storage is in failed over state :type failed_over: bool :return: connector password :rtype: str """ if not os.path.isfile(filename): msg = ( "ScaleIO connector configuration file " "is not found in path %s." % filename ) raise exception.BrickException(message=msg) conf = configparser.ConfigParser() conf.read(filename) password_key = ( "replicating_san_password" if failed_over else "san_password" ) return conf[config_group][password_key] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.615759 os_brick-6.11.0/os_brick/remotefs/0000775000175000017500000000000000000000000017034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/remotefs/__init__.py0000664000175000017500000000000000000000000021133 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/remotefs/remotefs.py0000664000175000017500000002566700000000000021252 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remote filesystem client utilities.""" import os import re import tempfile from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils.secretutils import md5 from os_brick import exception from os_brick import executor from os_brick.i18n import _ LOG = logging.getLogger(__name__) class RemoteFsClient(executor.Executor): def __init__(self, mount_type, root_helper, execute=None, *args, **kwargs): super(RemoteFsClient, self).__init__(root_helper, execute=execute, *args, **kwargs) # type: ignore mount_type_to_option_prefix = { 'nfs': 'nfs', 'cifs': 'smbfs', 'glusterfs': 'glusterfs', 'vzstorage': 'vzstorage', 'quobyte': 'quobyte', 'scality': 'scality' } if mount_type not in mount_type_to_option_prefix: raise exception.ProtocolNotSupported(protocol=mount_type) self._mount_type = mount_type option_prefix = mount_type_to_option_prefix[mount_type] self._mount_base: str self._mount_base = kwargs.get(option_prefix + '_mount_point_base') # type: ignore if not self._mount_base: raise exception.InvalidParameterValue( err=_('%s_mount_point_base required') % option_prefix) self._mount_options = kwargs.get(option_prefix + '_mount_options') if mount_type == "nfs": self._check_nfs_options() def get_mount_base(self): return self._mount_base def _get_hash_str(self, base_str): """Return a string that represents hash of base_str (hex format).""" if isinstance(base_str, str): base_str = base_str.encode('utf-8') return md5(base_str, usedforsecurity=False).hexdigest() def get_mount_point(self, device_name: str): """Get Mount Point. :param device_name: example 172.18.194.100:/var/nfs """ return os.path.join(self._mount_base, self._get_hash_str(device_name)) def _read_mounts(self): """Returns a dict of mounts and their mountpoint Format reference: http://man7.org/linux/man-pages/man5/fstab.5.html """ with open("/proc/mounts", "r") as mounts: # Remove empty lines and split lines by whitespace lines = [line.split() for line in mounts.read().splitlines() if line.strip()] # Return {mountpoint: mountdevice}. Fields 2nd and 1st as per # http://man7.org/linux/man-pages/man5/fstab.5.html return {line[1]: line[0] for line in lines if line[0] != '#'} def mount(self, share, flags=None): """Mount given share.""" mount_path = self.get_mount_point(share) if mount_path in self._read_mounts(): LOG.debug('Already mounted: %s', mount_path) return self._execute('mkdir', '-p', mount_path, check_exit_code=0) if self._mount_type == 'nfs': self._mount_nfs(share, mount_path, flags) else: self._do_mount(self._mount_type, share, mount_path, self._mount_options, flags) def _do_mount(self, mount_type, share, mount_path, mount_options=None, flags=None): """Mounts share based on the specified params.""" mnt_cmd = ['mount', '-t', mount_type] if mount_options is not None: mnt_cmd.extend(['-o', mount_options]) if flags is not None: mnt_cmd.extend(flags) mnt_cmd.extend([share, mount_path]) try: self._execute(*mnt_cmd, root_helper=self._root_helper, run_as_root=True, check_exit_code=0) except processutils.ProcessExecutionError as exc: if 'already mounted' in exc.stderr: LOG.debug("Already mounted: %s", share) # The error message can say "busy or already mounted" when the # share didn't actually mount, so look for it. if share in self._read_mounts(): return LOG.error("Failed to mount %(share)s, reason: %(reason)s", {'share': share, 'reason': exc.stderr}) raise def _mount_nfs(self, nfs_share, mount_path, flags=None): """Mount nfs share using present mount types.""" mnt_errors = {} # This loop allows us to first try to mount with NFS 4.1 for pNFS # support but falls back to mount NFS 4 or NFS 3 if either the client # or server do not support it. for mnt_type in sorted(self._nfs_mount_type_opts.keys(), reverse=True): options = self._nfs_mount_type_opts[mnt_type] try: self._do_mount('nfs', nfs_share, mount_path, options, flags) LOG.debug('Mounted %(sh)s using %(mnt_type)s.', {'sh': nfs_share, 'mnt_type': mnt_type}) return except Exception as e: mnt_errors[mnt_type] = str(e) LOG.debug('Failed to do %s mount.', mnt_type) raise exception.BrickException(_("NFS mount failed for share %(sh)s. " "Error - %(error)s") % {'sh': nfs_share, 'error': mnt_errors}) def _check_nfs_options(self): """Checks and prepares nfs mount type options.""" self._nfs_mount_type_opts = {'nfs': self._mount_options} nfs_vers_opt_patterns = ['^nfsvers', '^vers', r'^v[\d]'] for opt in nfs_vers_opt_patterns: if self._option_exists(self._mount_options, opt): return # pNFS requires NFS 4.1. The mount.nfs4 utility does not automatically # negotiate 4.1 support, we have to ask for it by specifying two # options: vers=4 and minorversion=1. pnfs_opts = self._update_option(self._mount_options, 'vers', '4') pnfs_opts = self._update_option(pnfs_opts, 'minorversion', '1') self._nfs_mount_type_opts['pnfs'] = pnfs_opts def _option_exists(self, options, opt_pattern): """Checks if the option exists in nfs options and returns position.""" options = [x.strip() for x in options.split(',')] if options else [] pos = 0 for opt in options: pos = pos + 1 if re.match(opt_pattern, opt, flags=0): return pos return 0 def _update_option(self, options, option, value=None): """Update option if exists else adds it and returns new options.""" opts = [x.strip() for x in options.split(',')] if options else [] pos = self._option_exists(options, option) if pos: opts.pop(pos - 1) opt = '%s=%s' % (option, value) if value else option opts.append(opt) return ",".join(opts) if len(opts) > 1 else opts[0] class ScalityRemoteFsClient(RemoteFsClient): def __init__(self, mount_type, root_helper, execute=None, *args, **kwargs): super(ScalityRemoteFsClient, self).__init__( mount_type, root_helper, execute=execute, *args, **kwargs) # type: ignore self._mount_type = mount_type self._mount_base = kwargs.get( 'scality_mount_point_base', "").rstrip('/') if not self._mount_base: raise exception.InvalidParameterValue( err=_('scality_mount_point_base required')) self._mount_options = None def get_mount_point(self, device_name): return os.path.join(self._mount_base, device_name, "00") def mount(self, share, flags=None): """Mount the Scality ScaleOut FS. The `share` argument is ignored because you can't mount several SOFS at the same type on a single server. But we want to keep the same method signature for class inheritance purpose. """ if self._mount_base in self._read_mounts(): LOG.debug('Already mounted: %s', self._mount_base) return self._execute('mkdir', '-p', self._mount_base, check_exit_code=0) super(ScalityRemoteFsClient, self)._do_mount( 'sofs', '/etc/sfused.conf', self._mount_base) class VZStorageRemoteFSClient(RemoteFsClient): def _vzstorage_write_mds_list(self, cluster_name, mdss): tmp_dir = tempfile.mkdtemp(prefix='vzstorage-') tmp_bs_path = os.path.join(tmp_dir, 'bs_list') with open(tmp_bs_path, 'w') as f: for mds in mdss: f.write(mds + "\n") conf_dir = os.path.join('/etc/pstorage/clusters', cluster_name) if os.path.exists(conf_dir): bs_path = os.path.join(conf_dir, 'bs_list') self._execute('cp', '-f', tmp_bs_path, bs_path, root_helper=self._root_helper, run_as_root=True) else: self._execute('cp', '-rf', tmp_dir, conf_dir, root_helper=self._root_helper, run_as_root=True) self._execute('chown', '-R', 'root:root', conf_dir, root_helper=self._root_helper, run_as_root=True) def _do_mount(self, mount_type, vz_share, mount_path, mount_options=None, flags=None): m = re.search(r"(?:(\S+):\/)?([a-zA-Z0-9_-]+)(?::(\S+))?", vz_share) if not m: msg = (_("Invalid Virtuozzo Storage share specification: %r." "Must be: [MDS1[,MDS2],...:/][:PASSWORD].") % vz_share) raise exception.BrickException(msg) mdss = m.group(1) cluster_name = m.group(2) passwd = m.group(3) if mdss: mdss = mdss.split(',') self._vzstorage_write_mds_list(cluster_name, mdss) if passwd: self._execute('pstorage', '-c', cluster_name, 'auth-node', '-P', process_input=passwd, root_helper=self._root_helper, run_as_root=True) mnt_cmd = ['pstorage-mount', '-c', cluster_name] if flags: mnt_cmd.extend(flags) mnt_cmd.extend([mount_path]) self._execute(*mnt_cmd, root_helper=self._root_helper, run_as_root=True, check_exit_code=0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/remotefs/windows_remotefs.py0000664000175000017500000001303700000000000023010 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Windows remote filesystem client utilities.""" import os import re import warnings from os_win import utilsfactory from oslo_log import log as logging from os_brick import exception from os_brick.i18n import _ from os_brick.remotefs import remotefs LOG = logging.getLogger(__name__) class WindowsRemoteFsClient(remotefs.RemoteFsClient): _username_regex = re.compile(r'user(?:name)?=([^, ]+)') _password_regex = re.compile(r'pass(?:word)?=([^, ]+)') _loopback_share_map = {} def __init__(self, mount_type, root_helper=None, execute=None, *args, **kwargs): warnings.warn('Support for Windows OS has been deprecated.', category=DeprecationWarning, stacklevel=2) mount_type_to_option_prefix = { 'cifs': 'smbfs', 'smbfs': 'smbfs', } self._local_path_for_loopback = kwargs.get('local_path_for_loopback', True) if mount_type not in mount_type_to_option_prefix: raise exception.ProtocolNotSupported(protocol=mount_type) self._mount_type = mount_type option_prefix = mount_type_to_option_prefix[mount_type] self._mount_base = kwargs.get(option_prefix + '_mount_point_base') self._mount_options = kwargs.get(option_prefix + '_mount_options') self._smbutils = utilsfactory.get_smbutils() self._pathutils = utilsfactory.get_pathutils() def get_local_share_path(self, share, expect_existing=True): share = self._get_share_norm_path(share) share_name = self.get_share_name(share) share_subdir = self.get_share_subdir(share) is_local_share = self._smbutils.is_local_share(share) if not is_local_share: LOG.debug("Share '%s' is not exposed by the current host.", share) local_share_path = None else: local_share_path = self._smbutils.get_smb_share_path(share_name) if not local_share_path and expect_existing: err_msg = _("Could not find the local " "share path for %(share)s.") raise exception.VolumePathsNotFound(err_msg % dict(share=share)) if local_share_path and share_subdir: local_share_path = os.path.join(local_share_path, share_subdir) return local_share_path def _get_share_norm_path(self, share): return share.replace('/', '\\') def get_share_name(self, share): return self._get_share_norm_path(share).lstrip('\\').split('\\')[1] def get_share_subdir(self, share): return "\\".join( self._get_share_norm_path(share).lstrip('\\').split('\\')[2:]) def mount(self, share, flags=None): share_norm_path = self._get_share_norm_path(share) use_local_path = (self._local_path_for_loopback and self._smbutils.is_local_share(share_norm_path)) if use_local_path: LOG.info("Skipping mounting local share %(share_path)s.", dict(share_path=share_norm_path)) else: mount_options = " ".join( [self._mount_options or '', flags or '']) username, password = self._parse_credentials(mount_options) if not self._smbutils.check_smb_mapping( share_norm_path): self._smbutils.mount_smb_share(share_norm_path, username=username, password=password) if self._mount_base: self._create_mount_point(share, use_local_path) def unmount(self, share): self._smbutils.unmount_smb_share(self._get_share_norm_path(share)) def _create_mount_point(self, share, use_local_path): # The mount point will contain a hash of the share so we're # intentionally preserving the original share path as this is # what the caller will expect. mnt_point = self.get_mount_point(share) share_norm_path = self._get_share_norm_path(share) symlink_dest = (share_norm_path if not use_local_path else self.get_local_share_path(share)) if not os.path.isdir(self._mount_base): os.makedirs(self._mount_base) if os.path.exists(mnt_point): if not self._pathutils.is_symlink(mnt_point): raise exception.BrickException(_("Link path already exists " "and it's not a symlink")) else: self._pathutils.create_sym_link(mnt_point, symlink_dest) def _parse_credentials(self, opts_str): if not opts_str: return None, None match = self._username_regex.findall(opts_str) username = match[0] if match and match[0] != 'guest' else None match = self._password_regex.findall(opts_str) password = match[0] if match else None return username, password ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.615759 os_brick-6.11.0/os_brick/tests/0000775000175000017500000000000000000000000016352 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/__init__.py0000664000175000017500000000000000000000000020451 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/base.py0000664000175000017500000001130000000000000017631 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from unittest import mock import fixtures from oslo_concurrency import lockutils from oslo_config import fixture as config_fixture from oslo_utils import strutils import testtools from os_brick.initiator.connectors import nvmeof class TestCase(testtools.TestCase): """Test case base class for all unit tests.""" SENTINEL = object() def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string(os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) if environ_enabled('OS_LOG_CAPTURE'): log_format = '%(levelname)s [%(name)s] %(message)s' if environ_enabled('OS_DEBUG'): level = logging.DEBUG else: level = logging.INFO self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, format=log_format, level=level)) # Protect against any case where someone doesn't remember to patch a # retry decorated call patcher = mock.patch('os_brick.utils._time_sleep') patcher.start() self.addCleanup(patcher.stop) # At runtime this would be set by the library user: Cinder, Nova, etc. self.useFixture(fixtures.NestedTempfile()) lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) # Regardless of the system force no native NVMe-oF multipathing nvmeof.NVMeOFConnector.native_multipath_supported = False def _common_cleanup(self): """Runs after each test method to tear down test environment.""" # Stop any timers for x in self.injected: try: x.stop() except AssertionError: pass # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: del self.__dict__[key] def log_level(self, level): """Set logging level to the specified value.""" log_root = logging.getLogger(None).logger log_root.setLevel(level) def mock_object(self, obj, attr_name, new_attr=SENTINEL, **kwargs): """Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ args = [obj, attr_name] if new_attr is not self.SENTINEL: args.append(new_attr) patcher = mock.patch.object(*args, **kwargs) mocked = patcher.start() self.addCleanup(patcher.stop) return mocked def patch(self, path, *args, **kwargs): """Use python mock to mock a path with automatic cleanup.""" patcher = mock.patch(path, *args, **kwargs) result = patcher.start() self.addCleanup(patcher.stop) return result ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.619759 os_brick-6.11.0/os_brick/tests/caches/0000775000175000017500000000000000000000000017600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/caches/__init__.py0000664000175000017500000000000000000000000021677 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/caches/test_init.py0000664000175000017500000001142300000000000022155 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_brick import caches from os_brick import exception from os_brick.tests import base class CacheManagerTestCase(base.TestCase): def setUp(self): super(CacheManagerTestCase, self).setUp() self.connection_info = { "data": { "device_path": "/dev/disk/by-path/" "ip-192.0.2.0:3260-iscsi-iqn.2010-10.org.openstack" ":volume-fake_uuid-lun-1", }, } self.root_helper = None @mock.patch('os_brick.executor.Executor._execute') def test_init_invalid_device_path(self, moc_exec): conn_info_invalid = { 'data': { } } self.assertRaises( exception.VolumeLocalCacheNotSupported, caches.CacheManager, root_helper=None, connection_info=conn_info_invalid ) @mock.patch('os_brick.caches.CacheManager._get_engine') def test_init_cacheable(self, moc_get_engine): moc_get_engine.return_value = None conn_info_cacheable = { 'data': { 'device_path': '/dev/sdd', 'cacheable': True } } conn_info_non_cacheable = { 'data': { 'device_path': '/dev/sdd', } } mgr_cacheable = caches.CacheManager( root_helper=None, connection_info=conn_info_cacheable) mgr_non_cacheable = caches.CacheManager( root_helper=None, connection_info=conn_info_non_cacheable) self.assertTrue(mgr_cacheable.cacheable) self.assertFalse(mgr_non_cacheable.cacheable) @mock.patch('os_brick.caches.opencas.OpenCASEngine.is_engine_ready') def test_get_engine(self, moc_get_engine): conn_info = { 'data': { 'device_path': '/dev/sdd', 'cacheable': True } } mgr = caches.CacheManager(root_helper=None, cache_name='opencas', connection_info=conn_info) self.assertIsNotNone(mgr.engine) self.assertRaises( exception.Invalid, caches.CacheManager, root_helper=None, connection_info=conn_info ) @mock.patch('os_brick.caches.opencas.OpenCASEngine.is_engine_ready') @mock.patch('os_brick.caches.opencas.OpenCASEngine.attach_volume') def test_attach_volume(self, moc_attach, moc_eng_ready): conn_info = { 'data': { 'device_path': '/dev/sdd', } } moc_attach.return_value = '/dev/cas1-1' moc_eng_ready.return_value = True mgr = caches.CacheManager(root_helper=None, cache_name='opencas', connection_info=conn_info) self.assertEqual('/dev/sdd', mgr.attach_volume()) conn_info['data']['cacheable'] = True mgr = caches.CacheManager(root_helper=None, cache_name='opencas', connection_info=conn_info) self.assertEqual('/dev/cas1-1', mgr.attach_volume()) @mock.patch('os_brick.caches.opencas.OpenCASEngine.is_engine_ready') @mock.patch('os_brick.caches.opencas.OpenCASEngine.detach_volume') def test_detach_volume(self, moc_detach, moc_eng_ready): conn_info = { 'data': { 'device_path': '/dev/sdd', } } moc_detach.return_value = '/dev/sdd' moc_eng_ready.return_value = True # cacheable == False mgr = caches.CacheManager(root_helper=None, cache_name='opencas', connection_info=conn_info) self.assertEqual('/dev/sdd', mgr.attach_volume()) # cacheable == True conn_info['data']['cacheable'] = True mgr = caches.CacheManager(root_helper=None, cache_name='opencas', connection_info=conn_info) self.assertEqual('/dev/sdd', mgr.detach_volume()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/caches/test_opencas.py0000664000175000017500000001505400000000000022646 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_concurrency import processutils as putils from os_brick.caches import opencas from os_brick import exception from os_brick.tests import base class OpenCASEngineTestCase(base.TestCase): def setUp(self): super(OpenCASEngineTestCase, self).setUp() self.connection_info = { "data": { "device_path": "/dev/disk/by-path/" "ip-192.0.2.0:3260-iscsi-iqn.2010-10.org.openstack" ":volume-fake_uuid-lun-1", }, } self.root_helper = None @mock.patch('os_brick.executor.Executor._execute') def test_os_execute_exception(self, mock_execute): raise_err = [ putils.ProcessExecutionError(exit_code=1), mock.DEFAULT, ] engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) mock_execute.side_effect = raise_err self.assertRaises(putils.ProcessExecutionError, engine.os_execute, 'cmd', 'param') mock_execute.side_effect = raise_err self.assertRaises(putils.ProcessExecutionError, engine.is_engine_ready) mock_execute.side_effect = raise_err self.assertRaises(putils.ProcessExecutionError, engine._get_mapped_casdev, 'path') mock_execute.side_effect = raise_err self.assertRaises(putils.ProcessExecutionError, engine._get_mapped_coredev, 'path') mock_execute.side_effect = raise_err self.assertRaises(putils.ProcessExecutionError, engine._map_casdisk, 'path') mock_execute.side_effect = raise_err self.assertRaises(putils.ProcessExecutionError, engine._unmap_casdisk, 'path') @mock.patch('os_brick.executor.Executor._execute') def test_is_engine_ready(self, moc_exec): out_ready = """type id disk status write policy device cache 1 /dev/nvme0n1 Running wt -""" out_not_ready = 'type id disk status write policy device' err = '' engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) moc_exec.return_value = (out_ready, err) ret = engine.is_engine_ready() self.assertTrue(ret) moc_exec.return_value = (out_not_ready, err) ret = engine.is_engine_ready() self.assertFalse(ret) moc_exec.assert_has_calls([ mock.call('casadm', '-L', run_as_root=True, root_helper=None) ]) @mock.patch('os_brick.executor.Executor._execute') def test_get_mapped_casdev(self, moc_exec): out_ready = """type id disk status write policy device cache 1 /dev/nvme0n1 Running wt - └core 1 /dev/sdd Active - /dev/cas1-1""" err = '' engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) moc_exec.return_value = (out_ready, err) ret1 = engine._get_mapped_casdev('/dev/sdd') self.assertEqual('/dev/cas1-1', ret1) @mock.patch('os_brick.executor.Executor._execute') def test_get_mapped_coredev(self, moc_exec): out_ready = """type id disk status write policy device cache 1 /dev/nvme0n1 Running wt - └core 1 /dev/sdd Active - /dev/cas1-1""" err = '' engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) moc_exec.return_value = (out_ready, err) ret1, ret2 = engine._get_mapped_coredev('/dev/cas1-1') self.assertEqual('1', ret1) self.assertEqual('/dev/sdd', ret2) @mock.patch('os_brick.executor.Executor._execute') @mock.patch('os_brick.caches.opencas.OpenCASEngine._get_mapped_casdev') def test_map_casdisk(self, moc_get_mapped_casdev, moc_exec): engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) moc_get_mapped_casdev.return_value = '' moc_exec.return_value = ('', '') engine._map_casdisk('/dev/sdd') moc_exec.assert_has_calls([ mock.call('casadm', '-A', '-i', 1, '-d', '/dev/sdd', run_as_root=True, root_helper=None) ]) @mock.patch('os_brick.executor.Executor._execute') def test_unmap_casdisk(self, moc_exec): engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) moc_exec.return_value = ('', '') engine._unmap_casdisk('1') moc_exec.assert_has_calls([ mock.call('casadm', '-R', '-f', '-i', 1, '-j', '1', run_as_root=True, root_helper=None) ]) @mock.patch('os_brick.caches.opencas.OpenCASEngine._map_casdisk') def test_attach_volume(self, moc_map): engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) moc_map.return_value = '' args = {'no_dev_path': 'path'} self.assertRaises(exception.VolumePathsNotFound, engine.attach_volume, **args) self.assertRaises(exception.VolumePathsNotFound, engine.attach_volume) # No exception if dev_path set correctly args = {'dev_path': 'path'} engine.attach_volume(**args) @mock.patch('os_brick.executor.Executor._execute') def test_detach_volume(self, moc_exec): out_ready = """type id disk status write policy device cache 1 /dev/nvme0n1 Running wt - └core 1 /dev/sdd Active - /dev/cas1-1""" err = '' engine = opencas.OpenCASEngine(root_helper=None, opencas_cache_id=1) moc_exec.return_value = (out_ready, err) args = {'no_dev_path': 'path'} self.assertRaises(exception.VolumePathsNotFound, engine.detach_volume, **args) self.assertRaises(exception.VolumePathsNotFound, engine.detach_volume) # No exception if dev_path set correctly args = {'dev_path': '/dev/cas1-1'} engine.detach_volume(**args) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.619759 os_brick-6.11.0/os_brick/tests/encryptors/0000775000175000017500000000000000000000000020562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/encryptors/__init__.py0000664000175000017500000000000000000000000022661 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/encryptors/test_base.py0000664000175000017500000002436100000000000023113 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii from unittest import mock from castellan.common import objects as castellan_objects from castellan.tests.unit.key_manager import fake from os_brick import encryptors from os_brick.tests import base def fake__get_key_symmetric(passphrase): raw = bytes(binascii.unhexlify(passphrase)) symmetric_key = castellan_objects.symmetric_key.SymmetricKey( 'AES', len(raw) * 8, raw) return symmetric_key def fake__get_key_passphrase(passphrase): raw = passphrase.encode('utf-8') passphrase_key = castellan_objects.passphrase.Passphrase(raw) return passphrase_key class BaseVolumeEncryptor(encryptors.base.VolumeEncryptor): def attach_volume(self, context, **kwargs): pass def detach_volume(self, **kwargs): pass def extend_volume(self, context, **kwargs): pass class VolumeEncryptorTestCase(base.TestCase): def _create(self): pass def setUp(self): super(VolumeEncryptorTestCase, self).setUp() self.connection_info = { "data": { "device_path": "/dev/disk/by-path/" "ip-192.0.2.0:3260-iscsi-iqn.2010-10.org.openstack" ":volume-fake_uuid-lun-1", }, } self.root_helper = None self.keymgr = fake.fake_api() self.encryptor = self._create() class BaseEncryptorTestCase(VolumeEncryptorTestCase): def _test_get_encryptor(self, provider, expected_provider_class): encryption = {'control_location': 'front-end', 'provider': provider} encryptor = encryptors.get_volume_encryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr, **encryption) self.assertIsInstance(encryptor, expected_provider_class) def test_get_encryptors(self): self._test_get_encryptor('luks', encryptors.luks.LuksEncryptor) # TODO(lyarwood): Remove the following in Pike self._test_get_encryptor('LuksEncryptor', encryptors.luks.LuksEncryptor) self._test_get_encryptor('os_brick.encryptors.luks.LuksEncryptor', encryptors.luks.LuksEncryptor) self._test_get_encryptor('nova.volume.encryptors.luks.LuksEncryptor', encryptors.luks.LuksEncryptor) self._test_get_encryptor('plain', encryptors.cryptsetup.CryptsetupEncryptor) # TODO(lyarwood): Remove the following in Pike self._test_get_encryptor('CryptsetupEncryptor', encryptors.cryptsetup.CryptsetupEncryptor) self._test_get_encryptor( 'os_brick.encryptors.cryptsetup.CryptsetupEncryptor', encryptors.cryptsetup.CryptsetupEncryptor) self._test_get_encryptor( 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor', encryptors.cryptsetup.CryptsetupEncryptor) self._test_get_encryptor(None, encryptors.nop.NoOpEncryptor) # TODO(lyarwood): Remove the following in Pike self._test_get_encryptor('NoOpEncryptor', encryptors.nop.NoOpEncryptor) self._test_get_encryptor('os_brick.encryptors.nop.NoOpEncryptor', encryptors.nop.NoOpEncryptor) self._test_get_encryptor('nova.volume.encryptors.nop.NoopEncryptor', encryptors.nop.NoOpEncryptor) @mock.patch('os_brick.encryptors.base.VolumeEncryptor._get_key') def test__get_encryption_key_as_passphrase_hexlify(self, mock_key): """Test passphrase retrieval for secret type 'symmetric'. This should hexlify the secret in _get_encryption_key_as_passphrase. """ base_enc = BaseVolumeEncryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr ) fake_key_plain = 'passphrase-in-clear-text' fake_key_hexlified = binascii.hexlify(fake_key_plain.encode('utf-8')) mock_key.return_value = fake__get_key_symmetric(fake_key_hexlified) passphrase = base_enc._get_encryption_key_as_passphrase( mock.sentinel.context) mock_key.assert_called_once_with(mock.sentinel.context) self.assertEqual(passphrase, fake_key_hexlified.decode('utf-8')) @mock.patch('os_brick.encryptors.base.VolumeEncryptor._get_key') def test__get_encryption_key_as_passphrase(self, mock_key): """Test passphrase retrieval for secret type 'passphrase'. This should skip the hexlify step in _get_encryption_key_as_passphrase. """ base_enc = BaseVolumeEncryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr ) fake_key_plain = 'passphrase-in-clear-text' mock_key.return_value = fake__get_key_passphrase(fake_key_plain) passphrase = base_enc._get_encryption_key_as_passphrase( mock.sentinel.context) mock_key.assert_called_once_with(mock.sentinel.context) self.assertEqual(passphrase, fake_key_plain) def test_get_error_encryptors(self): encryption = {'control_location': 'front-end', 'provider': 'ErrorEncryptor'} self.assertRaises(ValueError, encryptors.get_volume_encryptor, root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr, **encryption) @mock.patch('os_brick.encryptors.LOG') def test_error_log(self, log): encryption = {'control_location': 'front-end', 'provider': 'TestEncryptor'} provider = 'TestEncryptor' try: encryptors.get_volume_encryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr, **encryption) except Exception as e: log.error.assert_called_once_with("Error instantiating " "%(provider)s: " "%(exception)s", {'provider': provider, 'exception': e}) @mock.patch('os_brick.encryptors.LOG') def test_get_missing_out_of_tree_encryptor_log(self, log): provider = 'TestEncryptor' encryption = {'control_location': 'front-end', 'provider': provider} try: encryptors.get_volume_encryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr, **encryption) except Exception as e: log.error.assert_called_once_with("Error instantiating " "%(provider)s: " "%(exception)s", {'provider': provider, 'exception': e}) log.warning.assert_called_once_with("Use of the out of tree " "encryptor class %(provider)s " "will be blocked with the " "Queens release of os-brick.", {'provider': provider}) @mock.patch('os_brick.encryptors.LOG') def test_get_direct_encryptor_log(self, log): encryption = {'control_location': 'front-end', 'provider': 'LuksEncryptor'} encryptors.get_volume_encryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr, **encryption) encryption = {'control_location': 'front-end', 'provider': 'os_brick.encryptors.luks.LuksEncryptor'} encryptors.get_volume_encryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr, **encryption) encryption = {'control_location': 'front-end', 'provider': 'nova.volume.encryptors.luks.LuksEncryptor'} encryptors.get_volume_encryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr, **encryption) log.warning.assert_has_calls([ mock.call("Use of the in tree encryptor class %(provider)s by " "directly referencing the implementation class will be " "blocked in the Queens release of os-brick.", {'provider': 'LuksEncryptor'}), mock.call("Use of the in tree encryptor class %(provider)s by " "directly referencing the implementation class will be " "blocked in the Queens release of os-brick.", {'provider': 'os_brick.encryptors.luks.LuksEncryptor'}), mock.call("Use of the in tree encryptor class %(provider)s by " "directly referencing the implementation class will be " "blocked in the Queens release of os-brick.", {'provider': 'nova.volume.encryptors.luks.LuksEncryptor'})]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/encryptors/test_cryptsetup.py0000664000175000017500000001527600000000000024430 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import copy from unittest import mock from castellan.common.objects import symmetric_key as key from castellan.tests.unit.key_manager import fake from os_brick.encryptors import cryptsetup from os_brick import exception from os_brick.tests.encryptors import test_base def fake__get_key(context, passphrase): raw = bytes(binascii.unhexlify(passphrase)) symmetric_key = key.SymmetricKey('AES', len(raw) * 8, raw) return symmetric_key class CryptsetupEncryptorTestCase(test_base.VolumeEncryptorTestCase): @mock.patch('os.path.exists', return_value=False) def _create(self, mock_exists): return cryptsetup.CryptsetupEncryptor( connection_info=self.connection_info, root_helper=self.root_helper, keymgr=self.keymgr) def setUp(self): super(CryptsetupEncryptorTestCase, self).setUp() self.dev_path = self.connection_info['data']['device_path'] self.dev_name = 'crypt-%s' % self.dev_path.split('/')[-1] self.symlink_path = self.dev_path @mock.patch('os_brick.executor.Executor._execute') def test__open_volume(self, mock_execute): self.encryptor._open_volume("passphrase") mock_execute.assert_has_calls([ mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name, self.dev_path, process_input='passphrase', run_as_root=True, root_helper=self.root_helper, check_exit_code=True), ]) @mock.patch('os_brick.executor.Executor._execute') def test_attach_volume(self, mock_execute): fake_key = 'e8b76872e3b04c18b3b6656bbf6f5089' self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = fake__get_key(None, fake_key) self.encryptor.attach_volume(None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name, self.dev_path, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), ]) @mock.patch('os_brick.executor.Executor._execute') def test__close_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'remove', self.dev_name, root_helper=self.root_helper, run_as_root=True, check_exit_code=[0, 4]), ]) @mock.patch('os_brick.executor.Executor._execute') def test_detach_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'remove', self.dev_name, root_helper=self.root_helper, run_as_root=True, check_exit_code=[0, 4]), ]) def test_init_volume_encryption_not_supported(self): # Tests that creating a CryptsetupEncryptor fails if there is no # device_path key. type = 'unencryptable' data = dict(volume_id='a194699b-aa07-4433-a945-a5d23802043e') connection_info = dict(driver_volume_type=type, data=data) exc = self.assertRaises(exception.VolumeEncryptionNotSupported, cryptsetup.CryptsetupEncryptor, root_helper=self.root_helper, connection_info=connection_info, keymgr=fake.fake_api()) self.assertIn(type, str(exc)) @mock.patch('os_brick.executor.Executor._execute') @mock.patch('os.path.exists', return_value=True) def test_init_volume_encryption_with_old_name(self, mock_exists, mock_execute): # If an old name crypt device exists, dev_path should be the old name. old_dev_name = self.dev_path.split('/')[-1] encryptor = cryptsetup.CryptsetupEncryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr) self.assertFalse(encryptor.dev_name.startswith('crypt-')) self.assertEqual(old_dev_name, encryptor.dev_name) self.assertEqual(self.dev_path, encryptor.dev_path) self.assertEqual(self.symlink_path, encryptor.symlink_path) mock_exists.assert_called_once_with('/dev/mapper/%s' % old_dev_name) mock_execute.assert_called_once_with( 'cryptsetup', 'status', old_dev_name, run_as_root=True) @mock.patch('os_brick.executor.Executor._execute') @mock.patch('os.path.exists', side_effect=[False, True]) def test_init_volume_encryption_with_wwn(self, mock_exists, mock_execute): # If an wwn name crypt device exists, dev_path should be based on wwn. old_dev_name = self.dev_path.split('/')[-1] wwn = 'fake_wwn' connection_info = copy.deepcopy(self.connection_info) connection_info['data']['multipath_id'] = wwn encryptor = cryptsetup.CryptsetupEncryptor( root_helper=self.root_helper, connection_info=connection_info, keymgr=fake.fake_api(), execute=mock_execute) self.assertFalse(encryptor.dev_name.startswith('crypt-')) self.assertEqual(wwn, encryptor.dev_name) self.assertEqual(self.dev_path, encryptor.dev_path) self.assertEqual(self.symlink_path, encryptor.symlink_path) mock_exists.assert_has_calls([ mock.call('/dev/mapper/%s' % old_dev_name), mock.call('/dev/mapper/%s' % wwn)]) mock_execute.assert_called_once_with( 'cryptsetup', 'status', wwn, run_as_root=True) def test_extend_volume(self): self.assertRaises(NotImplementedError, self.encryptor.extend_volume, mock.sentinel.context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/encryptors/test_luks.py0000664000175000017500000003477600000000000023172 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import copy from unittest import mock from castellan.common.objects import symmetric_key as key from castellan.tests.unit.key_manager import fake from oslo_concurrency import processutils as putils from os_brick.encryptors import luks from os_brick import exception from os_brick.tests.encryptors import test_base def fake__get_key(context, passphrase): raw = bytes(binascii.unhexlify(passphrase)) symmetric_key = key.SymmetricKey('AES', len(raw) * 8, raw) return symmetric_key class LuksEncryptorTestCase(test_base.VolumeEncryptorTestCase): def setUp(self): super().setUp() self.dev_path = self.connection_info['data']['device_path'] self.dev_name = 'crypt-%s' % self.dev_path.split('/')[-1] self.symlink_path = self.dev_path def _create(self): return luks.LuksEncryptor(root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr) @mock.patch('os_brick.executor.Executor._execute') def test_is_luks(self, mock_execute): luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, run_as_root=True, root_helper=self.root_helper, check_exit_code=True), ], any_order=False) @mock.patch('os_brick.executor.Executor._execute') @mock.patch('os_brick.encryptors.luks.LOG') def test_is_luks_with_error(self, mock_log, mock_execute): error_msg = "Device %s is not a valid LUKS device." % self.dev_path mock_execute.side_effect = putils.ProcessExecutionError( exit_code=1, stderr=error_msg) luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, run_as_root=True, root_helper=self.root_helper, check_exit_code=True), ]) self.assertEqual(1, mock_log.warning.call_count) # warning logged @mock.patch('os_brick.executor.Executor._execute') def test__format_volume(self, mock_execute): self.encryptor._format_volume("passphrase") mock_execute.assert_has_calls([ mock.call('cryptsetup', '--batch-mode', 'luksFormat', '--type', 'luks1', '--key-file=-', self.dev_path, process_input='passphrase', root_helper=self.root_helper, run_as_root=True, check_exit_code=True, attempts=3), ]) @mock.patch('os_brick.executor.Executor._execute') def test__open_volume(self, mock_execute): self.encryptor._open_volume("passphrase") mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input='passphrase', root_helper=self.root_helper, run_as_root=True, check_exit_code=True), ]) @mock.patch('os_brick.executor.Executor._execute') def test_attach_volume(self, mock_execute): fake_key = '0c84146034e747639b698368807286df' self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = fake__get_key(None, fake_key) self.encryptor.attach_volume(None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), ]) @mock.patch('os_brick.executor.Executor._execute') def test_attach_volume_not_formatted(self, mock_execute): fake_key = 'bc37c5eccebe403f9cc2d0dd20dac2bc' self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = fake__get_key(None, fake_key) mock_execute.side_effect = [ putils.ProcessExecutionError(exit_code=1), # luksOpen putils.ProcessExecutionError(exit_code=1), # isLuks mock.DEFAULT, # luksFormat mock.DEFAULT, # luksOpen mock.DEFAULT, # ln ] self.encryptor.attach_volume(None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', '--batch-mode', 'luksFormat', '--type', 'luks1', '--key-file=-', self.dev_path, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True, attempts=3), mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), ], any_order=False) @mock.patch('os_brick.executor.Executor._execute') def test_attach_volume_fail(self, mock_execute): fake_key = 'ea6c2e1b8f7f4f84ae3560116d659ba2' self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = fake__get_key(None, fake_key) mock_execute.side_effect = [ putils.ProcessExecutionError(exit_code=1), # luksOpen mock.DEFAULT, # isLuks ] self.assertRaises(putils.ProcessExecutionError, self.encryptor.attach_volume, None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), ], any_order=False) @mock.patch('os_brick.executor.Executor._execute') def test__close_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksClose', self.dev_name, root_helper=self.root_helper, attempts=3, run_as_root=True, check_exit_code=[0, 4]), ]) @mock.patch('os_brick.executor.Executor._execute') def test_detach_volume(self, mock_execute): self.encryptor.detach_volume() mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksClose', self.dev_name, root_helper=self.root_helper, attempts=3, run_as_root=True, check_exit_code=[0, 4]), ]) def test_init_volume_encryption_not_supported(self): # Tests that creating a CryptsetupEncryptor fails if there is no # device_path key. type = 'unencryptable' data = dict(volume_id='a194699b-aa07-4433-a945-a5d23802043e') connection_info = dict(driver_volume_type=type, data=data) exc = self.assertRaises(exception.VolumeEncryptionNotSupported, luks.LuksEncryptor, root_helper=self.root_helper, connection_info=connection_info, keymgr=fake.fake_api()) self.assertIn(type, str(exc)) @mock.patch('os_brick.executor.Executor._execute') @mock.patch('os.path.exists', return_value=True) def test_init_volume_encryption_with_old_name(self, mock_exists, mock_execute): # If an old name crypt device exists, dev_path should be the old name. old_dev_name = self.dev_path.split('/')[-1] encryptor = luks.LuksEncryptor( root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr) self.assertFalse(encryptor.dev_name.startswith('crypt-')) self.assertEqual(old_dev_name, encryptor.dev_name) self.assertEqual(self.dev_path, encryptor.dev_path) self.assertEqual(self.symlink_path, encryptor.symlink_path) mock_exists.assert_called_once_with('/dev/mapper/%s' % old_dev_name) mock_execute.assert_called_once_with( 'cryptsetup', 'status', old_dev_name, run_as_root=True) @mock.patch('os_brick.executor.Executor._execute') @mock.patch('os.path.exists', side_effect=[False, True]) def test_init_volume_encryption_with_wwn(self, mock_exists, mock_execute): # If an wwn name crypt device exists, dev_path should be based on wwn. old_dev_name = self.dev_path.split('/')[-1] wwn = 'fake_wwn' connection_info = copy.deepcopy(self.connection_info) connection_info['data']['multipath_id'] = wwn encryptor = luks.LuksEncryptor( root_helper=self.root_helper, connection_info=connection_info, keymgr=fake.fake_api()) self.assertFalse(encryptor.dev_name.startswith('crypt-')) self.assertEqual(wwn, encryptor.dev_name) self.assertEqual(self.dev_path, encryptor.dev_path) self.assertEqual(self.symlink_path, encryptor.symlink_path) mock_exists.assert_has_calls([ mock.call('/dev/mapper/%s' % old_dev_name), mock.call('/dev/mapper/%s' % wwn)]) mock_execute.assert_called_once_with( 'cryptsetup', 'status', wwn, run_as_root=True) @mock.patch('os_brick.utils.get_device_size') @mock.patch.object(luks.LuksEncryptor, '_execute') @mock.patch.object(luks.LuksEncryptor, '_get_encryption_key_as_passphrase') def test_extend_volume(self, mock_pass, mock_exec, mock_size): encryptor = self.encryptor res = encryptor.extend_volume(mock.sentinel.context) self.assertEqual(mock_size.return_value, res) mock_pass.assert_called_once_with(mock.sentinel.context) mock_exec.assert_called_once_with( 'cryptsetup', 'resize', encryptor.dev_path, process_input=mock_pass.return_value, run_as_root=True, check_exit_code=True, root_helper=encryptor._root_helper) mock_size.assert_called_once_with(encryptor, encryptor.dev_path) class Luks2EncryptorTestCase(LuksEncryptorTestCase): def _create(self): return luks.Luks2Encryptor(root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr) @mock.patch('os_brick.executor.Executor._execute') def test__format_volume(self, mock_execute): self.encryptor._format_volume("passphrase") mock_execute.assert_has_calls([ mock.call('cryptsetup', '--batch-mode', 'luksFormat', '--type', 'luks2', '--key-file=-', self.dev_path, process_input='passphrase', root_helper=self.root_helper, run_as_root=True, check_exit_code=True, attempts=3), ]) @mock.patch('os_brick.executor.Executor._execute') def test_attach_volume_not_formatted(self, mock_execute): fake_key = 'bc37c5eccebe403f9cc2d0dd20dac2bc' self.encryptor._get_key = mock.MagicMock() self.encryptor._get_key.return_value = fake__get_key(None, fake_key) mock_execute.side_effect = [ putils.ProcessExecutionError(exit_code=1), # luksOpen putils.ProcessExecutionError(exit_code=1), # isLuks mock.DEFAULT, # luksFormat mock.DEFAULT, # luksOpen mock.DEFAULT, # ln ] self.encryptor.attach_volume(None) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('cryptsetup', '--batch-mode', 'luksFormat', '--type', 'luks2', '--key-file=-', self.dev_path, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True, attempts=3), mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path, self.dev_name, process_input=fake_key, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), mock.call('ln', '--symbolic', '--force', '/dev/mapper/%s' % self.dev_name, self.symlink_path, root_helper=self.root_helper, run_as_root=True, check_exit_code=True), ], any_order=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/encryptors/test_nop.py0000664000175000017500000000311300000000000022765 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.encryptors import nop from os_brick.tests.encryptors import test_base class NoOpEncryptorTestCase(test_base.VolumeEncryptorTestCase): def _create(self): return nop.NoOpEncryptor(root_helper=self.root_helper, connection_info=self.connection_info, keymgr=self.keymgr) def test_attach_volume(self): test_args = { 'control_location': 'front-end', 'provider': 'NoOpEncryptor', } self.encryptor.attach_volume(None, **test_args) def test_detach_volume(self): test_args = { 'control_location': 'front-end', 'provider': 'NoOpEncryptor', } self.encryptor.detach_volume(**test_args) def test_extend_volume(self): # Test that it exists and doesn't break on call self.encryptor.extend_volume('context', anything=1, goes='asdf') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.619759 os_brick-6.11.0/os_brick/tests/initiator/0000775000175000017500000000000000000000000020354 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/__init__.py0000664000175000017500000000000000000000000022453 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.627759 os_brick-6.11.0/os_brick/tests/initiator/connectors/0000775000175000017500000000000000000000000022531 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/__init__.py0000664000175000017500000000000000000000000024630 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_base_iscsi.py0000664000175000017500000000762000000000000026253 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_brick.initiator.connectors import base_iscsi from os_brick.initiator.connectors import fake from os_brick.tests import base as test_base class BaseISCSIConnectorTestCase(test_base.TestCase): def setUp(self): super(BaseISCSIConnectorTestCase, self).setUp() self.connector = fake.FakeBaseISCSIConnector(None) @mock.patch.object(base_iscsi.BaseISCSIConnector, '_get_all_targets') def test_iterate_all_targets(self, mock_get_all_targets): # extra_property cannot be a sentinel, a copied sentinel will not # identical to the original one. connection_properties = { 'target_portals': mock.sentinel.target_portals, 'target_iqns': mock.sentinel.target_iqns, 'target_luns': mock.sentinel.target_luns, 'extra_property': 'extra_property'} mock_get_all_targets.return_value = [( mock.sentinel.portal, mock.sentinel.iqn, mock.sentinel.lun)] # method is a generator, and it yields dictionaries. list() will # iterate over all of the method's items. list_props = list( self.connector._iterate_all_targets(connection_properties)) mock_get_all_targets.assert_called_once_with(connection_properties) self.assertEqual(1, len(list_props)) expected_props = {'target_portal': mock.sentinel.portal, 'target_iqn': mock.sentinel.iqn, 'target_lun': mock.sentinel.lun, 'extra_property': 'extra_property'} self.assertEqual(expected_props, list_props[0]) def test_get_all_targets(self): portals = [mock.sentinel.portals1, mock.sentinel.portals2] iqns = [mock.sentinel.iqns1, mock.sentinel.iqns2] luns = [mock.sentinel.luns1, mock.sentinel.luns2] connection_properties = {'target_portals': portals, 'target_iqns': iqns, 'target_luns': luns} all_targets = self.connector._get_all_targets(connection_properties) expected_targets = zip(portals, iqns, luns) self.assertEqual(list(expected_targets), list(all_targets)) def test_get_all_targets_no_target_luns(self): portals = [mock.sentinel.portals1, mock.sentinel.portals2] iqns = [mock.sentinel.iqns1, mock.sentinel.iqns2] lun = mock.sentinel.luns connection_properties = {'target_portals': portals, 'target_iqns': iqns, 'target_lun': lun} all_targets = self.connector._get_all_targets(connection_properties) expected_targets = zip(portals, iqns, [lun, lun]) self.assertEqual(list(expected_targets), list(all_targets)) def test_get_all_targets_single_target(self): connection_properties = { 'target_portal': mock.sentinel.target_portal, 'target_iqn': mock.sentinel.target_iqn, 'target_lun': mock.sentinel.target_lun} all_targets = self.connector._get_all_targets(connection_properties) expected_target = (mock.sentinel.target_portal, mock.sentinel.target_iqn, mock.sentinel.target_lun) self.assertEqual([expected_target], all_targets) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_base_rbd.py0000664000175000017500000000610100000000000025701 0ustar00zuulzuul00000000000000# Copyright 2020 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_brick.initiator.connectors import base_rbd from os_brick.tests import base # Both Linux and Windows tests are using those mocks. class RBDConnectorTestMixin(object): def setUp(self): super(RBDConnectorTestMixin, self).setUp() self.user = 'fake_user' self.pool = 'fake_pool' self.volume = 'fake_volume' self.clustername = 'fake_ceph' self.hosts = ['192.168.10.2'] self.ports = ['6789'] self.keyring = "[client.cinder]\n key = test\n" self.image_name = '%s/%s' % (self.pool, self.volume) self.connection_properties = { 'auth_username': self.user, 'name': self.image_name, 'cluster_name': self.clustername, 'hosts': self.hosts, 'ports': self.ports, 'keyring': self.keyring, } @ddt.ddt class TestRBDConnectorMixin(RBDConnectorTestMixin, base.TestCase): def setUp(self): super(TestRBDConnectorMixin, self).setUp() self._conn = base_rbd.RBDConnectorMixin() @ddt.data((['192.168.1.1', '192.168.1.2'], ['192.168.1.1', '192.168.1.2']), (['3ffe:1900:4545:3:200:f8ff:fe21:67cf', 'fe80:0:0:0:200:f8ff:fe21:67cf'], ['[3ffe:1900:4545:3:200:f8ff:fe21:67cf]', '[fe80:0:0:0:200:f8ff:fe21:67cf]']), (['foobar', 'fizzbuzz'], ['foobar', 'fizzbuzz']), (['192.168.1.1', '3ffe:1900:4545:3:200:f8ff:fe21:67cf', 'hello, world!'], ['192.168.1.1', '[3ffe:1900:4545:3:200:f8ff:fe21:67cf]', 'hello, world!'])) @ddt.unpack def test_sanitize_mon_host(self, hosts_in, hosts_out): self.assertEqual(hosts_out, self._conn._sanitize_mon_hosts(hosts_in)) def test_get_rbd_args(self): res = self._conn._get_rbd_args(self.connection_properties, None) expected = ['--id', self.user, '--mon_host', self.hosts[0] + ':' + self.ports[0]] self.assertEqual(expected, res) def test_get_rbd_args_with_conf(self): res = self._conn._get_rbd_args(self.connection_properties, mock.sentinel.conf_path) expected = ['--id', self.user, '--mon_host', self.hosts[0] + ':' + self.ports[0], '--conf', mock.sentinel.conf_path] self.assertEqual(expected, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_fibre_channel.py0000664000175000017500000014200400000000000026722 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from os_brick import exception from os_brick.initiator.connectors import base from os_brick.initiator.connectors import fibre_channel from os_brick.initiator import linuxfc from os_brick.initiator import linuxscsi from os_brick.tests.initiator import test_connector @ddt.ddt class FibreChannelConnectorTestCase(test_connector.ConnectorTestCase): def setUp(self): super(FibreChannelConnectorTestCase, self).setUp() self.connector = fibre_channel.FibreChannelConnector( None, execute=self.fake_execute, use_multipath=False) self.assertIsNotNone(self.connector) self.assertIsNotNone(self.connector._linuxfc) self.assertIsNotNone(self.connector._linuxscsi) def fake_get_fc_hbas(self): return [{'ClassDevice': 'host1', 'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0' '/0000:05:00.2/host1/fc_host/host1', 'dev_loss_tmo': '30', 'fabric_name': '0x1000000533f55566', 'issue_lip': '', 'max_npiv_vports': '255', 'maxframe_size': '2048 bytes', 'node_name': '0x200010604b019419', 'npiv_vports_inuse': '0', 'port_id': '0x680409', 'port_name': '0x100010604b019419', 'port_state': 'Online', 'port_type': 'NPort (fabric via point-to-point)', 'speed': '10 Gbit', 'supported_classes': 'Class 3', 'supported_speeds': '10 Gbit', 'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27', 'tgtid_bind_type': 'wwpn (World Wide Port Name)', 'uevent': None, 'vport_create': '', 'vport_delete': ''}] def fake_get_fc_hbas_with_platform(self): return [{'ClassDevice': 'host1', 'ClassDevicePath': '/sys/devices/platform/smb' '/smb:motherboard/80040000000.peu0-c0' '/pci0000:00/0000:00:03.0' '/0000:05:00.2/host1/fc_host/host1', 'dev_loss_tmo': '30', 'fabric_name': '0x1000000533f55566', 'issue_lip': '', 'max_npiv_vports': '255', 'maxframe_size': '2048 bytes', 'node_name': '0x200010604b019419', 'npiv_vports_inuse': '0', 'port_id': '0x680409', 'port_name': '0x100010604b019419', 'port_state': 'Online', 'port_type': 'NPort (fabric via point-to-point)', 'speed': '10 Gbit', 'supported_classes': 'Class 3', 'supported_speeds': '10 Gbit', 'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27', 'tgtid_bind_type': 'wwpn (World Wide Port Name)', 'uevent': None, 'vport_create': '', 'vport_delete': ''}] def fake_get_fc_hbas_info(self): hbas = self.fake_get_fc_hbas() info = [{'port_name': hbas[0]['port_name'].replace('0x', ''), 'node_name': hbas[0]['node_name'].replace('0x', ''), 'host_device': hbas[0]['ClassDevice'], 'device_path': hbas[0]['ClassDevicePath']}] return info def fake_get_fc_hbas_info_with_platform(self): hbas = self.fake_get_fc_hbas_with_platform() info = [{'port_name': hbas[0]['port_name'].replace('0x', ''), 'node_name': hbas[0]['node_name'].replace('0x', ''), 'host_device': hbas[0]['ClassDevice'], 'device_path': hbas[0]['ClassDevicePath']}] return info def fibrechan_connection(self, volume, location, wwn, lun=1): return {'driver_volume_type': 'fibrechan', 'data': { 'volume_id': volume['id'], 'target_portal': location, 'target_wwn': wwn, 'target_lun': lun, }} @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') def test_get_connector_properties(self, mock_hbas): mock_hbas.return_value = self.fake_get_fc_hbas() multipath = True enforce_multipath = True props = fibre_channel.FibreChannelConnector.get_connector_properties( 'sudo', multipath=multipath, enforce_multipath=enforce_multipath) hbas = self.fake_get_fc_hbas() expected_props = {'wwpns': [hbas[0]['port_name'].replace('0x', '')], 'wwnns': [hbas[0]['node_name'].replace('0x', '')]} self.assertEqual(expected_props, props) def test_get_search_path(self): search_path = self.connector.get_search_path() expected = "/dev/disk/by-path" self.assertEqual(expected, search_path) def test_get_pci_num(self): hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0" "/0000:05:00.3/host2/fc_host/host2"} platform, pci_num = self.connector._get_pci_num(hba) self.assertEqual("0000:05:00.3", pci_num) self.assertIsNone(platform) hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0" "/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"} platform, pci_num = self.connector._get_pci_num(hba) self.assertEqual("0000:06:00.6", pci_num) self.assertIsNone(platform) hba = {'device_path': "/sys/devices/pci0000:20/0000:20:03.0" "/0000:21:00.2/net/ens2f2/ctlr_2/host3" "/fc_host/host3"} platform, pci_num = self.connector._get_pci_num(hba) self.assertEqual("0000:21:00.2", pci_num) self.assertIsNone(platform) def test_get_pci_num_with_platform(self): hba = {'device_path': "/sys/devices/platform/smb/smb:motherboard/" "80040000000.peu0-c0/pci0000:00/0000:00:03.0" "/0000:05:00.3/host2/fc_host/host2"} platform, pci_num = self.connector._get_pci_num(hba) self.assertEqual("0000:05:00.3", pci_num) self.assertEqual("platform-80040000000.peu0-c0", platform) hba = {'device_path': "/sys/devices/platform/smb/smb:motherboard" "/80040000000.peu0-c0/pci0000:00/0000:00:03.0" "/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"} platform, pci_num = self.connector._get_pci_num(hba) self.assertEqual("0000:06:00.6", pci_num) self.assertEqual("platform-80040000000.peu0-c0", platform) hba = {'device_path': "/sys/devices/platform/smb" "/smb:motherboard/80040000000.peu0-c0/pci0000:20" "/0000:20:03.0/0000:21:00.2" "/net/ens2f2/ctlr_2/host3/fc_host/host3"} platform, pci_num = self.connector._get_pci_num(hba) self.assertEqual("0000:21:00.2", pci_num) self.assertEqual("platform-80040000000.peu0-c0", platform) @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') def test_get_volume_paths(self, fake_fc_hbas_info, fake_fc_hbas, fake_exists): fake_fc_hbas.side_effect = self.fake_get_fc_hbas fake_fc_hbas_info.side_effect = self.fake_get_fc_hbas_info name = 'volume-00000001' vol = {'id': 1, 'name': name} location = '10.0.2.15:3260' wwn = '1234567890123456' connection_info = self.fibrechan_connection(vol, location, wwn) conn_data = self.connector._add_targets_to_connection_properties( connection_info['data'] ) volume_paths = self.connector.get_volume_paths(conn_data) expected = ['/dev/disk/by-path/pci-0000:05:00.2' '-fc-0x1234567890123456-lun-1'] self.assertEqual(expected, volume_paths) @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') def test_get_volume_paths_with_platform(self, fake_fc_hbas_info, fake_fc_hbas, fake_exists): fake_fc_hbas.side_effect = self.fake_get_fc_hbas_with_platform fake_fc_hbas_info.side_effect \ = self.fake_get_fc_hbas_info_with_platform name = 'volume-00000001' vol = {'id': 1, 'name': name} location = '10.0.2.15:3260' wwn = '1234567890123456' connection_info = self.fibrechan_connection(vol, location, wwn) conn_data = self.connector._add_targets_to_connection_properties( connection_info['data'] ) volume_paths = self.connector.get_volume_paths(conn_data) expected = ['/dev/disk/by-path' '/platform-80040000000.peu0-c0-pci-0000:05:00.2' '-fc-0x1234567890123456-lun-1'] self.assertEqual(expected, volume_paths) @mock.patch.object( base.BaseLinuxConnector, 'check_multipath', mock.MagicMock()) @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_scsi_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_connect_volume(self, check_valid_device_mock, get_device_info_mock, get_scsi_wwn_mock, remove_device_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock): check_valid_device_mock.return_value = True get_fc_hbas_mock.side_effect = self.fake_get_fc_hbas get_fc_hbas_info_mock.side_effect = self.fake_get_fc_hbas_info wwn = '1234567890' multipath_devname = '/dev/md-1' devices = {"device": multipath_devname, "id": wwn, "devices": [{'device': '/dev/sdb', 'address': '1:0:0:1', 'host': 1, 'channel': 0, 'id': 0, 'lun': 1}]} get_device_info_mock.return_value = devices['devices'][0] get_scsi_wwn_mock.return_value = wwn location = '10.0.2.15:3260' name = 'volume-00000001' vol = {'id': 1, 'name': name} # Should work for string, unicode, and list wwns_luns = [ ('1234567890123456', 1), (str('1234567890123456'), 1), (['1234567890123456', '1234567890123457'], 1), (['1234567890123456', '1234567890123457'], 1), ] for wwn, lun in wwns_luns: connection_info = self.fibrechan_connection(vol, location, wwn, lun) dev_info = self.connector.connect_volume(connection_info['data']) exp_wwn = wwn[0] if isinstance(wwn, list) else wwn dev_str = ('/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' % exp_wwn) self.assertEqual(dev_info['type'], 'block') self.assertEqual(dev_info['path'], dev_str) self.assertNotIn('multipath_id', dev_info) self.assertNotIn('devices', dev_info) self.connector.disconnect_volume(connection_info['data'], dev_info) expected_commands = [] self.assertEqual(expected_commands, self.cmds) # Should not work for anything other than string, unicode, and list connection_info = self.fibrechan_connection(vol, location, 123) self.assertRaises(exception.VolumePathsNotFound, self.connector.connect_volume, connection_info['data']) get_fc_hbas_mock.side_effect = [[]] get_fc_hbas_info_mock.side_effect = [[]] self.assertRaises(exception.VolumePathsNotFound, self.connector.connect_volume, connection_info['data']) @mock.patch.object( base.BaseLinuxConnector, 'check_multipath', mock.MagicMock()) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_mpath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm') def _test_connect_volume_multipath(self, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, access_mode, should_wait_for_rw, sysfs_multipath_dm_mock, wait_mpath_device_mock, find_mp_device_path_mock, sysfs_dm_name=None): self.connector.use_multipath = True get_fc_hbas_mock.side_effect = self.fake_get_fc_hbas get_fc_hbas_info_mock.side_effect = self.fake_get_fc_hbas_info sysfs_multipath_dm_mock.return_value = sysfs_dm_name wwn = '1234567890' multipath_devname = '/dev/md-1' devices = {"device": multipath_devname, "id": wwn, "devices": [{'device': '/dev/sdb', 'address': '1:0:0:1', 'host': 1, 'channel': 0, 'id': 0, 'lun': 1}, {'device': '/dev/sdc', 'address': '1:0:0:2', 'host': 1, 'channel': 0, 'id': 0, 'lun': 1}]} get_device_info_mock.side_effect = devices['devices'] get_scsi_wwn_mock.return_value = wwn location = '10.0.2.15:3260' name = 'volume-00000001' vol = {'id': 1, 'name': name} initiator_wwn = ['1234567890123456', '1234567890123457'] find_mp_device_path_mock.return_value = '/dev/mapper/mpatha' find_mp_dev_mock.return_value = {"device": "dm-3", "id": wwn, "name": "mpatha"} connection_info = self.fibrechan_connection(vol, location, initiator_wwn) connection_info['data']['access_mode'] = access_mode self.connector.connect_volume(connection_info['data']) self.assertEqual(should_wait_for_rw, wait_for_rw_mock.called) self.connector.disconnect_volume(connection_info['data'], devices['devices'][0]) expected_commands = [ 'multipath -f ' + find_mp_device_path_mock.return_value, 'tee -a /sys/block/sdb/device/delete', 'tee -a /sys/block/sdc/device/delete', ] self.assertEqual(expected_commands, self.cmds) sysfs_multipath_dm_mock.assert_called_once() if sysfs_dm_name: wait_mpath_device_mock.assert_called_once_with(sysfs_dm_name) else: wait_mpath_device_mock.assert_not_called() return connection_info @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_connect_volume_multipath_found_dm(self, check_valid_device_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock): check_valid_device_mock.return_value = True self._test_connect_volume_multipath(get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, 'rw', True, sysfs_dm_name='dm-18') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_connect_volume_multipath_rw(self, check_valid_device_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock): check_valid_device_mock.return_value = True self._test_connect_volume_multipath(get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, 'rw', True) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_connect_volume_multipath_no_access_mode(self, check_valid_device_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock): check_valid_device_mock.return_value = True self._test_connect_volume_multipath(get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, None, True) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_connect_volume_multipath_ro(self, check_valid_device_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock): check_valid_device_mock.return_value = True self._test_connect_volume_multipath(get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, 'ro', False) @mock.patch.object(base.BaseLinuxConnector, '_discover_mpath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_connect_volume_multipath_not_found(self, check_valid_device_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, discover_mp_dev_mock): check_valid_device_mock.return_value = True discover_mp_dev_mock.return_value = ("/dev/disk/by-path/something", None) connection_info = self._test_connect_volume_multipath( get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, 'rw', False) self.assertNotIn('multipathd_id', connection_info['data']) # Ensure we don't call it with the real path device_name = discover_mp_dev_mock.call_args[0][-1] self.assertNotEqual(realpath_mock.return_value, device_name) @mock.patch.object(fibre_channel.FibreChannelConnector, 'get_volume_paths') def test_extend_volume_no_path(self, mock_volume_paths): mock_volume_paths.return_value = [] volume = {'id': 'fake_uuid'} wwn = '1234567890123456' connection_info = self.fibrechan_connection(volume, "10.0.2.15:3260", wwn) self.assertRaises(exception.VolumePathsNotFound, self.connector.extend_volume, connection_info['data']) @mock.patch.object(linuxscsi.LinuxSCSI, 'extend_volume') @mock.patch.object(fibre_channel.FibreChannelConnector, 'get_volume_paths') def test_extend_volume(self, mock_volume_paths, mock_scsi_extend): fake_new_size = 1024 mock_volume_paths.return_value = ['/dev/vdx'] mock_scsi_extend.return_value = fake_new_size volume = {'id': 'fake_uuid'} wwn = '1234567890123456' connection_info = self.fibrechan_connection(volume, "10.0.2.15:3260", wwn) new_size = self.connector.extend_volume(connection_info['data']) self.assertEqual(fake_new_size, new_size) @mock.patch.object(os.path, 'isdir') def test_get_all_available_volumes_path_not_dir(self, mock_isdir): mock_isdir.return_value = False expected = [] actual = self.connector.get_all_available_volumes() self.assertCountEqual(expected, actual) @mock.patch('eventlet.greenthread.sleep', mock.Mock()) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_connect_volume_device_not_valid(self, check_valid_device_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock): check_valid_device_mock.return_value = False self.assertRaises(exception.NoFibreChannelVolumeDeviceFound, self._test_connect_volume_multipath, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock, 'rw', True) @ddt.data( { "target_info": { "target_lun": 1, "target_wwn": '1234567890123456', }, "expected_targets": [ ('1234567890123456', 1) ] }, { "target_info": { "target_lun": 1, "target_wwn": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 1), ] }, { "target_info": { "target_luns": [1, 1], "target_wwn": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 1), ] }, { "target_info": { "target_luns": [1, 2], "target_wwn": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 2), ] }, { "target_info": { "target_luns": [1, 1], "target_wwns": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 1), ] }, { "target_info": { "target_lun": 7, "target_luns": [1, 1], "target_wwn": 'foo', "target_wwns": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 1), ] }, # Add the zone map in now { "target_info": { "target_lun": 1, "target_wwn": '1234567890123456', }, "expected_targets": [ ('1234567890123456', 1) ], "itmap": { '0004567890123456': ['1234567890123456'] }, "expected_map": { '0004567890123456': [('1234567890123456', 1)] } }, { "target_info": { "target_lun": 1, "target_wwn": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 1), ], "itmap": { '0004567890123456': ['1234567890123456', '1234567890123457'] }, "expected_map": { '0004567890123456': [('1234567890123456', 1), ('1234567890123457', 1)] } }, { "target_info": { "target_luns": [1, 2], "target_wwn": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 2), ], "itmap": { '0004567890123456': ['1234567890123456'], '1004567890123456': ['1234567890123457'], }, "expected_map": { '0004567890123456': [('1234567890123456', 1)], '1004567890123456': [('1234567890123457', 2)], } }, { "target_info": { "target_luns": [1, 2], "target_wwn": ['1234567890123456', '1234567890123457'], }, "expected_targets": [ ('1234567890123456', 1), ('1234567890123457', 2), ], "itmap": { '0004567890123456': ['1234567890123456', '1234567890123457'] }, "expected_map": { '0004567890123456': [('1234567890123456', 1), ('1234567890123457', 2)] } }, { "target_info": { "target_lun": 1, "target_wwn": ['20320002AC01E166', '21420002AC01E166', '20410002AC01E166', '21410002AC01E166'] }, "expected_targets": [ ('20320002ac01e166', 1), ('21420002ac01e166', 1), ('20410002ac01e166', 1), ('21410002ac01e166', 1) ], "itmap": { '10001409DCD71FF6': ['20320002AC01E166', '21420002AC01E166'], '10001409DCD71FF7': ['20410002AC01E166', '21410002AC01E166'] }, "expected_map": { '10001409dcd71ff6': [('20320002ac01e166', 1), ('21420002ac01e166', 1)], '10001409dcd71ff7': [('20410002ac01e166', 1), ('21410002ac01e166', 1)] } }, ) @ddt.unpack def test__add_targets_to_connection_properties(self, target_info, expected_targets, itmap=None, expected_map=None): volume = {'id': 'fake_uuid'} wwn = '1234567890123456' conn = self.fibrechan_connection(volume, "10.0.2.15:3260", wwn) conn['data'].update(target_info) conn['data']['initiator_target_map'] = itmap connection_info = self.connector._add_targets_to_connection_properties( conn['data']) self.assertIn('targets', connection_info) self.assertEqual(expected_targets, connection_info['targets']) # Check that we turn to lowercase target wwns key = 'target_wwns' if 'target_wwns' in target_info else 'target_wwn' wwns = target_info.get(key) wwns = [wwns] if isinstance(wwns, str) else wwns wwns = [w.lower() for w in wwns] if wwns: self.assertEqual(wwns, conn['data'][key]) if itmap: self.assertIn('initiator_target_lun_map', connection_info) self.assertEqual(expected_map, connection_info['initiator_target_lun_map']) @ddt.data(('/dev/mapper/', True), ('/dev/mapper/mpath0', True), # Check real devices are properly detected as non multipaths ('/dev/sda', False), ('/dev/disk/by-path/pci-1-fc-1-lun-1', False)) @ddt.unpack @mock.patch('os_brick.initiator.linuxscsi.LinuxSCSI.remove_scsi_device') @mock.patch('os_brick.initiator.linuxscsi.LinuxSCSI.requires_flush') @mock.patch('os_brick.utils.get_dev_path') def test__remove_devices(self, path_used, was_multipath, get_dev_path_mock, flush_mock, remove_mock): exc = exception.ExceptionChainer() get_dev_path_mock.return_value = path_used self.connector._remove_devices(mock.sentinel.con_props, [{'device': '/dev/sda'}], mock.sentinel.device_info, force=False, exc=exc) self.assertFalse(bool(exc)) get_dev_path_mock.assert_called_once_with(mock.sentinel.con_props, mock.sentinel.device_info) flush_mock.assert_called_once_with('/dev/sda', path_used, was_multipath) remove_mock.assert_called_once_with('/dev/sda', flush=flush_mock.return_value) @ddt.data(('/dev/mapper/', True), ('/dev/mapper/mpath0', True), # Check real devices are properly detected as non multipaths ('/dev/sda', False), ('/dev/disk/by-path/pci-1-fc-1-lun-1', False)) @ddt.unpack @mock.patch('os_brick.initiator.linuxscsi.LinuxSCSI.remove_scsi_device') @mock.patch('os_brick.initiator.linuxscsi.LinuxSCSI.requires_flush') @mock.patch('os_brick.utils.get_dev_path') def test__remove_devices_fails(self, path_used, was_multipath, get_dev_path_mock, flush_mock, remove_mock): exc = exception.ExceptionChainer() get_dev_path_mock.return_value = path_used remove_mock.side_effect = Exception self.connector._remove_devices(mock.sentinel.con_props, [{'device': '/dev/sda'}, {'device': '/dev/sdb'}], mock.sentinel.device_info, force=True, exc=exc) self.assertTrue(bool(exc)) get_dev_path_mock.assert_called_once_with(mock.sentinel.con_props, mock.sentinel.device_info) expect_flush = [mock.call('/dev/sda', path_used, was_multipath), mock.call('/dev/sdb', path_used, was_multipath)] self.assertEqual(len(expect_flush), flush_mock.call_count) flush_mock.assert_has_calls(expect_flush) expect_remove = [mock.call('/dev/sda', flush=flush_mock.return_value), mock.call('/dev/sdb', flush=flush_mock.return_value)] self.assertEqual(len(expect_remove), remove_mock.call_count) remove_mock.assert_has_calls(expect_remove) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath', return_value='/dev/sdb') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_disconnect_volume(self, check_valid_device_mock, find_mp_device_path_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, find_mp_dev_mock): check_valid_device_mock.return_value = True self.connector.use_multipath = True get_fc_hbas_mock.side_effect = self.fake_get_fc_hbas get_fc_hbas_info_mock.side_effect = self.fake_get_fc_hbas_info wwn = '1234567890' multipath_devname = '/dev/md-1' devices = {"device": multipath_devname, "id": wwn, "devices": [{'device': '/dev/sdb', 'address': '1:0:0:1', 'host': 1, 'channel': 0, 'id': 0, 'lun': 1}, {'device': '/dev/sdc', 'address': '1:0:0:2', 'host': 1, 'channel': 0, 'id': 0, 'lun': 1}]} get_device_info_mock.side_effect = devices['devices'] get_scsi_wwn_mock.return_value = wwn location = '10.0.2.15:3260' name = 'volume-00000001' vol = {'id': 1, 'name': name} initiator_wwn = ['1234567890123456', '1234567890123457'] find_mp_device_path_mock.return_value = '/dev/mapper/mpatha' find_mp_dev_mock.return_value = {"device": "dm-3", "id": wwn, "name": "mpatha"} connection_info = self.fibrechan_connection(vol, location, initiator_wwn) self.connector.disconnect_volume(connection_info['data'], devices['devices'][0]) expected_commands = [ 'multipath -f ' + find_mp_device_path_mock.return_value, 'tee -a /sys/block/sdb/device/delete', 'tee -a /sys/block/sdc/device/delete', ] self.assertEqual(expected_commands, self.cmds) @ddt.data((False, Exception), (True, Exception), (False, None)) @ddt.unpack @mock.patch.object(fibre_channel.FibreChannelConnector, '_remove_devices') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_del_map') @mock.patch.object(linuxscsi.LinuxSCSI, 'flush_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_rw') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(os.path, 'realpath') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(base.BaseLinuxConnector, 'check_valid_device') def test_disconnect_volume_fails(self, ignore_exc, side_effect, check_valid_device_mock, find_mp_device_path_mock, get_device_info_mock, get_scsi_wwn_mock, get_fc_hbas_info_mock, get_fc_hbas_mock, realpath_mock, exists_mock, wait_for_rw_mock, flush_mock, del_map_mock, remove_mock): flush_mock.side_effect = side_effect del_map_mock.side_effect = side_effect check_valid_device_mock.return_value = True self.connector.use_multipath = True get_fc_hbas_mock.side_effect = self.fake_get_fc_hbas get_fc_hbas_info_mock.side_effect = self.fake_get_fc_hbas_info wwn = '360002ac00000000000000b860000741c' multipath_devname = f'/dev/disk/by-id/dm-uuid-mpath-{wwn}' realpath_mock.return_value = '/dev/dm-1' devices = {"device": multipath_devname, "id": wwn, "devices": [{'device': '/dev/sdb', 'address': '1:0:0:1', 'host': 1, 'channel': 0, 'id': 0, 'lun': 1}, {'device': '/dev/sdc', 'address': '1:0:0:2', 'host': 1, 'channel': 0, 'id': 0, 'lun': 1}]} get_device_info_mock.side_effect = devices['devices'] get_scsi_wwn_mock.return_value = wwn location = '10.0.2.15:3260' name = 'volume-00000001' vol = {'id': 1, 'name': name} initiator_wwn = ['1234567890123456', '1234567890123457'] find_mp_device_path_mock.return_value = '/dev/mapper/mpatha' connection_info = self.fibrechan_connection(vol, location, initiator_wwn) if side_effect and not ignore_exc: self.assertRaises(exception.ExceptionChainer, self.connector.disconnect_volume, connection_info['data'], devices['devices'][0], force=True, ignore_errors=ignore_exc) else: self.connector.disconnect_volume(connection_info['data'], devices['devices'][0], force=True, ignore_errors=ignore_exc) flush_mock.assert_called_once_with( find_mp_device_path_mock.return_value) expected = [ mock.call(f'/dev/disk/by-path/pci-0000:05:00.2-fc-0x{wwn}-lun-1') for wwn in initiator_wwn] if side_effect: del_map_mock.assert_called_once_with('dm-1') expected.append(mock.call(find_mp_device_path_mock.return_value)) else: del_map_mock.assert_not_called() remove_mock.assert_called_once_with(connection_info['data'], devices['devices'], devices['devices'][0], True, mock.ANY) self.assertEqual(len(expected), realpath_mock.call_count) realpath_mock.assert_has_calls(expected) @ddt.data(None, mock.sentinel.addressing_mode) @mock.patch.object(fibre_channel.FibreChannelConnector, '_get_host_devices') @mock.patch.object(fibre_channel.FibreChannelConnector, '_get_possible_devices') def test__get_possible_volume_paths(self, addressing_mode, pos_devs_mock, host_devs_mock): conn_props = {'targets': mock.sentinel.targets} if addressing_mode: conn_props['addressing_mode'] = addressing_mode res = self.connector._get_possible_volume_paths(conn_props, mock.sentinel.hbas) pos_devs_mock.assert_called_once_with(mock.sentinel.hbas, mock.sentinel.targets, addressing_mode) host_devs_mock.assert_called_once_with(pos_devs_mock.return_value) self.assertEqual(host_devs_mock.return_value, res) @mock.patch.object(linuxscsi.LinuxSCSI, 'lun_for_addressing') @mock.patch.object(fibre_channel.FibreChannelConnector, '_get_pci_num') def test__get_possible_devices(self, pci_mock, lun_mock): pci_mock.side_effect = [ (mock.sentinel.platform1, mock.sentinel.pci_num1), (mock.sentinel.platform2, mock.sentinel.pci_num2)] hbas = [mock.sentinel.hba1, mock.sentinel.hba2] lun_mock.side_effect = [mock.sentinel.lun1B, mock.sentinel.lun2B] * 2 targets = [('wwn1', mock.sentinel.lun1), ('wwn2', mock.sentinel.lun2)] res = self.connector._get_possible_devices( hbas, targets, mock.sentinel.addressing_mode) self.assertEqual(2, pci_mock.call_count) pci_mock.assert_has_calls([mock.call(mock.sentinel.hba1), mock.call(mock.sentinel.hba2)]) self.assertEqual(4, lun_mock.call_count) lun_mock.assert_has_calls([ mock.call(mock.sentinel.lun1, mock.sentinel.addressing_mode), mock.call(mock.sentinel.lun2, mock.sentinel.addressing_mode)] * 2) expected = [ (mock.sentinel.platform1, mock.sentinel.pci_num1, '0xwwn1', mock.sentinel.lun1B), (mock.sentinel.platform1, mock.sentinel.pci_num1, '0xwwn2', mock.sentinel.lun2B), (mock.sentinel.platform2, mock.sentinel.pci_num2, '0xwwn1', mock.sentinel.lun1B), (mock.sentinel.platform2, mock.sentinel.pci_num2, '0xwwn2', mock.sentinel.lun2B), ] self.assertEqual(expected, res) @mock.patch.object(linuxscsi.LinuxSCSI, 'is_multipath_running') def test_supports_multipath(self, mock_mpath_running): self.connector.supports_multipath() mock_mpath_running.assert_called_once_with( root_helper=self.connector._root_helper) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_fibre_channel_ppc64.py0000664000175000017500000000442300000000000027740 0ustar00zuulzuul00000000000000# (c) Copyright 2013 IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_brick.initiator.connectors import fibre_channel_ppc64 from os_brick.initiator import linuxscsi from os_brick.tests.initiator import test_connector class FibreChannelConnectorPPC64TestCase(test_connector.ConnectorTestCase): def setUp(self): super(FibreChannelConnectorPPC64TestCase, self).setUp() self.connector = fibre_channel_ppc64.FibreChannelConnectorPPC64( None, execute=self.fake_execute, use_multipath=False) self.assertIsNotNone(self.connector) self.assertIsNotNone(self.connector._linuxfc) self.assertEqual(self.connector._linuxfc.__class__.__name__, "LinuxFibreChannel") self.assertIsNotNone(self.connector._linuxscsi) @mock.patch.object(linuxscsi.LinuxSCSI, 'process_lun_id', return_value='2') def test_get_host_devices(self, mock_process_lun_id): lun = 2 possible_devs = [(3, "0x5005076802232ade"), (3, "0x5005076802332ade"), ] devices = self.connector._get_host_devices(possible_devs, lun) self.assertEqual(2, len(devices)) device_path = "/dev/disk/by-path/fc-0x5005076802332ade-lun-2" self.assertIn(device_path, devices) device_path = "/dev/disk/by-path/fc-0x5005076802232ade-lun-2" self.assertIn(device_path, devices) # test duplicates possible_devs = [(3, "0x5005076802232ade"), (3, "0x5005076802232ade"), ] devices = self.connector._get_host_devices(possible_devs, lun) self.assertEqual(1, len(devices)) device_path = "/dev/disk/by-path/fc-0x5005076802232ade-lun-2" self.assertIn(device_path, devices) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_fibre_channel_s390x.py0000664000175000017500000001165400000000000027676 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_brick import exception from os_brick.initiator.connectors import fibre_channel_s390x from os_brick.initiator import linuxfc from os_brick.tests.initiator import test_connector class FibreChannelConnectorS390XTestCase(test_connector.ConnectorTestCase): def setUp(self): super(FibreChannelConnectorS390XTestCase, self).setUp() self.connector = fibre_channel_s390x.FibreChannelConnectorS390X( None, execute=self.fake_execute, use_multipath=False) self.assertIsNotNone(self.connector) self.assertIsNotNone(self.connector._linuxfc) self.assertEqual(self.connector._linuxfc.__class__.__name__, "LinuxFibreChannelS390X") self.assertIsNotNone(self.connector._linuxscsi) @mock.patch.object(linuxfc.LinuxFibreChannelS390X, 'configure_scsi_device') def test_get_host_devices(self, mock_configure_scsi_device): possible_devs = [(3, 5, 2), ] devices = self.connector._get_host_devices(possible_devs) mock_configure_scsi_device.assert_called_with(3, 5, "0x0002000000000000") self.assertEqual(3, len(devices)) device_path = "/dev/disk/by-path/ccw-3-zfcp-5:0x0002000000000000" self.assertEqual(devices[0], device_path) device_path = "/dev/disk/by-path/ccw-3-fc-5-lun-2" self.assertEqual(devices[1], device_path) device_path = "/dev/disk/by-path/ccw-3-fc-5-lun-0x0002000000000000" self.assertEqual(devices[2], device_path) def test_get_lun_string(self): lun = 1 lunstring = self.connector._get_lun_string(lun) self.assertEqual(lunstring, "0x0001000000000000") lun = 0xff lunstring = self.connector._get_lun_string(lun) self.assertEqual(lunstring, "0x00ff000000000000") lun = 0x101 lunstring = self.connector._get_lun_string(lun) self.assertEqual(lunstring, "0x0101000000000000") lun = 0x4020400a lunstring = self.connector._get_lun_string(lun) self.assertEqual(lunstring, "0x4020400a00000000") @mock.patch.object(fibre_channel_s390x.FibreChannelConnectorS390X, '_get_possible_devices', return_value=[('', 3, 5, 2), ]) @mock.patch.object(linuxfc.LinuxFibreChannelS390X, 'get_fc_hbas_info', return_value=[]) @mock.patch.object(linuxfc.LinuxFibreChannelS390X, 'deconfigure_scsi_device') def test_remove_devices(self, mock_deconfigure_scsi_device, mock_get_fc_hbas_info, mock_get_possible_devices): exc = exception.ExceptionChainer() connection_properties = {'targets': [5, 2]} self.connector._remove_devices(connection_properties, devices=None, device_info=None, force=False, exc=exc) mock_deconfigure_scsi_device.assert_called_with(3, 5, "0x0002000000000000") mock_get_fc_hbas_info.assert_called_once_with() mock_get_possible_devices.assert_called_once_with([], [5, 2], None) self.assertFalse(bool(exc)) @mock.patch.object(fibre_channel_s390x.FibreChannelConnectorS390X, '_get_possible_devices', return_value=[('', 3, 5, 2), ]) @mock.patch.object(linuxfc.LinuxFibreChannelS390X, 'get_fc_hbas_info', return_value=[]) @mock.patch.object(linuxfc.LinuxFibreChannelS390X, 'deconfigure_scsi_device') def test_remove_devices_force(self, mock_deconfigure_scsi_device, mock_get_fc_hbas_info, mock_get_possible_devices): exc = exception.ExceptionChainer() mock_deconfigure_scsi_device.side_effect = Exception connection_properties = {'targets': [5, 2]} self.connector._remove_devices(connection_properties, devices=None, device_info=None, force=True, exc=exc) mock_deconfigure_scsi_device.assert_called_with(3, 5, "0x0002000000000000") mock_get_fc_hbas_info.assert_called_once_with() mock_get_possible_devices.assert_called_once_with([], [5, 2], None) self.assertTrue(bool(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_gpfs.py0000664000175000017500000000272500000000000025107 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.initiator.connectors import gpfs from os_brick.tests.initiator.connectors import test_local class GPFSConnectorTestCase(test_local.LocalConnectorTestCase): def setUp(self): super(GPFSConnectorTestCase, self).setUp() self.connection_properties = {'name': 'foo', 'device_path': '/tmp/bar'} self.connector = gpfs.GPFSConnector(None) def test_connect_volume(self): cprops = self.connection_properties dev_info = self.connector.connect_volume(cprops) self.assertEqual(dev_info['type'], 'gpfs') self.assertEqual(dev_info['path'], cprops['device_path']) def test_connect_volume_with_invalid_connection_data(self): cprops = {} self.assertRaises(ValueError, self.connector.connect_volume, cprops) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_huawei.py0000664000175000017500000002444700000000000025437 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile from unittest import mock from os_brick import exception from os_brick.initiator.connectors import huawei from os_brick.tests.initiator import test_connector class HuaweiStorHyperConnectorTestCase(test_connector.ConnectorTestCase): """Test cases for StorHyper initiator class.""" attached = False def setUp(self): super(HuaweiStorHyperConnectorTestCase, self).setUp() self.fake_sdscli_file = tempfile.mktemp() self.addCleanup(os.remove, self.fake_sdscli_file) newefile = open(self.fake_sdscli_file, 'w') newefile.write('test') newefile.close() self.connector = huawei.HuaweiStorHyperConnector( None, execute=self.fake_execute) self.connector.cli_path = self.fake_sdscli_file self.connector.iscliexist = True self.connector_fail = huawei.HuaweiStorHyperConnector( None, execute=self.fake_execute_fail) self.connector_fail.cli_path = self.fake_sdscli_file self.connector_fail.iscliexist = True self.connector_nocli = huawei.HuaweiStorHyperConnector( None, execute=self.fake_execute_fail) self.connector_nocli.cli_path = self.fake_sdscli_file self.connector_nocli.iscliexist = False self.connection_properties = { 'access_mode': 'rw', 'qos_specs': None, 'volume_id': 'volume-b2911673-863c-4380-a5f2-e1729eecfe3f' } self.device_info = {'type': 'block', 'path': '/dev/vdxxx'} HuaweiStorHyperConnectorTestCase.attached = False def fake_execute(self, *cmd, **kwargs): method = cmd[2] self.cmds.append(" ".join(cmd)) if 'attach' == method: HuaweiStorHyperConnectorTestCase.attached = True return 'ret_code=0', None if 'querydev' == method: if HuaweiStorHyperConnectorTestCase.attached: return 'ret_code=0\ndev_addr=/dev/vdxxx', None else: return 'ret_code=1\ndev_addr=/dev/vdxxx', None if 'detach' == method: HuaweiStorHyperConnectorTestCase.attached = False return 'ret_code=0', None def fake_execute_fail(self, *cmd, **kwargs): method = cmd[2] self.cmds.append(" ".join(cmd)) if 'attach' == method: HuaweiStorHyperConnectorTestCase.attached = False return 'ret_code=330151401', None if 'querydev' == method: if HuaweiStorHyperConnectorTestCase.attached: return 'ret_code=0\ndev_addr=/dev/vdxxx', None else: return 'ret_code=1\ndev_addr=/dev/vdxxx', None if 'detach' == method: HuaweiStorHyperConnectorTestCase.attached = True return 'ret_code=330155007', None def test_get_connector_properties(self): props = huawei.HuaweiStorHyperConnector.get_connector_properties( 'sudo', multipath=True, enforce_multipath=True) expected_props = {} self.assertEqual(expected_props, props) def test_get_search_path(self): actual = self.connector.get_search_path() self.assertIsNone(actual) @mock.patch.object(huawei.HuaweiStorHyperConnector, '_query_attached_volume') def test_get_volume_paths(self, mock_query_attached): path = self.device_info['path'] mock_query_attached.return_value = {'ret_code': 0, 'dev_addr': path} expected = [path] actual = self.connector.get_volume_paths(self.connection_properties) self.assertEqual(expected, actual) def test_connect_volume(self): """Test the basic connect volume case.""" retval = self.connector.connect_volume(self.connection_properties) self.assertEqual(self.device_info, retval) expected_commands = [self.fake_sdscli_file + ' -c attach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c querydev' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f'] self.assertEqual(expected_commands, self.cmds) def test_disconnect_volume(self): """Test the basic disconnect volume case.""" self.connector.connect_volume(self.connection_properties) self.assertEqual(True, HuaweiStorHyperConnectorTestCase.attached) self.connector.disconnect_volume(self.connection_properties, self.device_info) self.assertEqual(False, HuaweiStorHyperConnectorTestCase.attached) expected_commands = [self.fake_sdscli_file + ' -c attach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c querydev' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c detach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f'] self.assertEqual(expected_commands, self.cmds) def test_is_volume_connected(self): """Test if volume connected to host case.""" self.connector.connect_volume(self.connection_properties) self.assertEqual(True, HuaweiStorHyperConnectorTestCase.attached) is_connected = self.connector.is_volume_connected( 'volume-b2911673-863c-4380-a5f2-e1729eecfe3f') self.assertEqual(HuaweiStorHyperConnectorTestCase.attached, is_connected) self.connector.disconnect_volume(self.connection_properties, self.device_info) self.assertEqual(False, HuaweiStorHyperConnectorTestCase.attached) is_connected = self.connector.is_volume_connected( 'volume-b2911673-863c-4380-a5f2-e1729eecfe3f') self.assertEqual(HuaweiStorHyperConnectorTestCase.attached, is_connected) expected_commands = [self.fake_sdscli_file + ' -c attach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c querydev' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c querydev' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c detach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c querydev' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f'] self.assertEqual(expected_commands, self.cmds) def test__analyze_output(self): cliout = 'ret_code=0\ndev_addr=/dev/vdxxx\nret_desc="success"' analyze_result = {'dev_addr': '/dev/vdxxx', 'ret_desc': '"success"', 'ret_code': '0'} result = self.connector._analyze_output(cliout) self.assertEqual(analyze_result, result) def test_connect_volume_fail(self): """Test the fail connect volume case.""" self.assertRaises(exception.BrickException, self.connector_fail.connect_volume, self.connection_properties) expected_commands = [self.fake_sdscli_file + ' -c attach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f'] self.assertEqual(expected_commands, self.cmds) def test_disconnect_volume_fail(self): """Test the fail disconnect volume case.""" self.connector.connect_volume(self.connection_properties) self.assertEqual(True, HuaweiStorHyperConnectorTestCase.attached) self.assertRaises(exception.BrickException, self.connector_fail.disconnect_volume, self.connection_properties, self.device_info) expected_commands = [self.fake_sdscli_file + ' -c attach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c querydev' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c detach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f'] self.assertEqual(expected_commands, self.cmds) def test_connect_volume_nocli(self): """Test the fail connect volume case.""" self.assertRaises(exception.BrickException, self.connector_nocli.connect_volume, self.connection_properties) def test_disconnect_volume_nocli(self): """Test the fail disconnect volume case.""" self.connector.connect_volume(self.connection_properties) self.assertEqual(True, HuaweiStorHyperConnectorTestCase.attached) self.assertRaises(exception.BrickException, self.connector_nocli.disconnect_volume, self.connection_properties, self.device_info) expected_commands = [self.fake_sdscli_file + ' -c attach' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f', self.fake_sdscli_file + ' -c querydev' ' -v volume-b2911673-863c-4380-a5f2-e1729eecfe3f'] self.assertEqual(expected_commands, self.cmds) def test_extend_volume(self): self.assertRaises(NotImplementedError, self.connector.extend_volume, self.connection_properties) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_iscsi.py0000664000175000017500000025211400000000000025261 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os from unittest import mock import ddt from oslo_concurrency import processutils as putils from os_brick import exception from os_brick.initiator.connectors import base from os_brick.initiator.connectors import iscsi from os_brick.initiator import linuxscsi from os_brick.initiator import utils from os_brick.privileged import rootwrap as priv_rootwrap from os_brick.tests.initiator import test_connector @ddt.ddt class ISCSIConnectorTestCase(test_connector.ConnectorTestCase): SINGLE_CON_PROPS = {'volume_id': 'vol_id', 'target_portal': 'ip1:port1', 'target_iqn': 'tgt1', 'encryption': False, 'target_lun': '1'} CON_PROPS = { 'volume_id': 'vol_id', 'target_portal': 'ip1:port1', 'target_iqn': 'tgt1', 'target_lun': 4, 'target_portals': ['ip1:port1', 'ip2:port2', 'ip3:port3', 'ip4:port4'], 'target_iqns': ['tgt1', 'tgt2', 'tgt3', 'tgt4'], 'target_luns': [4, 5, 6, 7], } def setUp(self): super(ISCSIConnectorTestCase, self).setUp() self.connector = iscsi.ISCSIConnector( None, execute=self.fake_execute, use_multipath=False) self.connector_with_multipath = iscsi.ISCSIConnector( None, execute=self.fake_execute, use_multipath=True) self.mock_object(self.connector._linuxscsi, 'get_name_from_path', return_value="/dev/sdb") self._fake_iqn = 'iqn.1234-56.foo.bar:01:23456789abc' self._name = 'volume-00000001' self._iqn = 'iqn.2010-10.org.openstack:%s' % self._name self._location = '10.0.2.15:3260' self._lun = 1 @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsi_session') def test_get_iscsi_sessions_full(self, sessions_mock): iscsiadm_result = ('tcp: [session1] ip1:port1,1 tgt1 (non-flash)\n' 'tcp: [session2] ip2:port2,-1 tgt2 (non-flash)\n' 'tcp: [session3] ip3:port3,1 tgt3\n') sessions_mock.return_value = (iscsiadm_result, '') res = self.connector._get_iscsi_sessions_full() expected = [('tcp:', 'session1', 'ip1:port1', '1', 'tgt1'), ('tcp:', 'session2', 'ip2:port2', '-1', 'tgt2'), ('tcp:', 'session3', 'ip3:port3', '1', 'tgt3')] self.assertListEqual(expected, res) @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsi_session') def test_get_iscsi_sessions_full_stderr(self, sessions_mock): iscsiadm_result = ('tcp: [session1] ip1:port1,1 tgt1 (non-flash)\n' 'tcp: [session2] ip2:port2,-1 tgt2 (non-flash)\n' 'tcp: [session3] ip3:port3,1 tgt3\n') sessions_mock.return_value = (iscsiadm_result, 'error') res = self.connector._get_iscsi_sessions_full() expected = [('tcp:', 'session1', 'ip1:port1', '1', 'tgt1'), ('tcp:', 'session2', 'ip2:port2', '-1', 'tgt2'), ('tcp:', 'session3', 'ip3:port3', '1', 'tgt3')] self.assertListEqual(expected, res) @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') def test_get_iscsi_sessions(self, sessions_mock): sessions_mock.return_value = [ ('tcp:', 'session1', 'ip1:port1', '1', 'tgt1'), ('tcp:', 'session2', 'ip2:port2', '-1', 'tgt2'), ('tcp:', 'session3', 'ip3:port3', '1', 'tgt3')] res = self.connector._get_iscsi_sessions() expected = ['ip1:port1', 'ip2:port2', 'ip3:port3'] self.assertListEqual(expected, res) @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full', return_value=[]) def test_get_iscsi_sessions_no_sessions(self, sessions_mock): res = self.connector._get_iscsi_sessions() self.assertListEqual([], res) sessions_mock.assert_called() @mock.patch.object(iscsi.ISCSIConnector, '_execute') def test_get_iscsi_nodes(self, exec_mock): iscsiadm_result = ('ip1:port1,1 tgt1\nip2:port2,-1 tgt2\n' 'ip3:port3,1 tgt3\n') exec_mock.return_value = (iscsiadm_result, '') res = self.connector._get_iscsi_nodes() expected = [('ip1:port1', 'tgt1'), ('ip2:port2', 'tgt2'), ('ip3:port3', 'tgt3')] self.assertListEqual(expected, res) exec_mock.assert_called_once_with( 'iscsiadm', '-m', 'node', run_as_root=True, root_helper=self.connector._root_helper, check_exit_code=False) @mock.patch.object(iscsi.ISCSIConnector, '_execute') def test_get_iscsi_nodes_error(self, exec_mock): exec_mock.return_value = (None, 'error') res = self.connector._get_iscsi_nodes() self.assertEqual([], res) @mock.patch.object(iscsi.ISCSIConnector, '_execute') def test_get_iscsi_nodes_corrupt(self, exec_mock): iscsiadm_result = ('ip1:port1,-1 tgt1\n' 'ip2:port2,-1 tgt2\n' '[]:port3,-1\n' 'ip4:port4,-1 tgt4\n') exec_mock.return_value = (iscsiadm_result, '') res = self.connector._get_iscsi_nodes() expected = [('ip1:port1', 'tgt1'), ('ip2:port2', 'tgt2'), ('ip4:port4', 'tgt4')] self.assertListEqual(expected, res) exec_mock.assert_called_once_with( 'iscsiadm', '-m', 'node', run_as_root=True, root_helper=self.connector._root_helper, check_exit_code=False) @mock.patch.object(iscsi.ISCSIConnector, '_get_ips_iqns_luns') @mock.patch('glob.glob') @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_nodes') def test_get_connection_devices(self, nodes_mock, sessions_mock, glob_mock, iql_mock): iql_mock.return_value = self.connector._get_all_targets(self.CON_PROPS) # List sessions from other targets and non tcp sessions sessions_mock.return_value = [ ('non-tcp:', '0', 'ip1:port1', '1', 'tgt1'), ('tcp:', '1', 'ip1:port1', '1', 'tgt1'), ('tcp:', '2', 'ip2:port2', '-1', 'tgt2'), ('tcp:', '3', 'ip1:port1', '1', 'tgt4'), ('tcp:', '4', 'ip2:port2', '-1', 'tgt5')] # List 1 node without sessions nodes_mock.return_value = [('ip1:port1', 'tgt1'), ('ip2:port2', 'tgt2'), ('ip3:port3', 'tgt3')] sys_cls = '/sys/class/scsi_host/host' glob_mock.side_effect = [ [sys_cls + '1/device/session1/target6/1:2:6:4/block/sda', sys_cls + '1/device/session1/target6/1:2:6:4/block/sda1'], [sys_cls + '2/device/session2/target7/2:2:7:5/block/sdb', sys_cls + '2/device/session2/target7/2:2:7:4/block/sdc'], ] res = self.connector._get_connection_devices(self.CON_PROPS) expected = {('ip1:port1', 'tgt1'): ({'sda'}, set()), ('ip2:port2', 'tgt2'): ({'sdb'}, {'sdc'}), ('ip3:port3', 'tgt3'): (set(), set())} self.assertDictEqual(expected, res) iql_mock.assert_called_once_with(self.CON_PROPS, discover=False, is_disconnect_call=False) @mock.patch('glob.glob') @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_nodes') def test_get_connection_devices_with_iqns(self, nodes_mock, sessions_mock, glob_mock): ips_iqns_luns = self.connector._get_all_targets(self.CON_PROPS) # List sessions from other targets and non tcp sessions sessions_mock.return_value = [ ('non-tcp:', '0', 'ip1:port1', '1', 'tgt1'), ('tcp:', '1', 'ip1:port1', '1', 'tgt1'), ('tcp:', '2', 'ip2:port2', '-1', 'tgt2'), ('tcp:', '3', 'ip1:port1', '1', 'tgt4'), ('tcp:', '4', 'ip2:port2', '-1', 'tgt5')] # List 1 node without sessions nodes_mock.return_value = [('ip1:port1', 'tgt1'), ('ip2:port2', 'tgt2'), ('ip3:port3', 'tgt3')] sys_cls = '/sys/class/scsi_host/host' glob_mock.side_effect = [ [sys_cls + '1/device/session1/target6/1:2:6:4/block/sda', sys_cls + '1/device/session1/target6/1:2:6:4/block/sda1'], [sys_cls + '2/device/session2/target7/2:2:7:5/block/sdb', sys_cls + '2/device/session2/target7/2:2:7:4/block/sdc'], ] with mock.patch.object(iscsi.ISCSIConnector, '_get_all_targets') as get_targets_mock: res = self.connector._get_connection_devices(mock.sentinel.props, ips_iqns_luns) expected = {('ip1:port1', 'tgt1'): ({'sda'}, set()), ('ip2:port2', 'tgt2'): ({'sdb'}, {'sdc'}), ('ip3:port3', 'tgt3'): (set(), set())} self.assertDictEqual(expected, res) get_targets_mock.assert_not_called() def generate_device(self, location, iqn, transport=None, lun=1): dev_format = "ip-%s-iscsi-%s-lun-%s" % (location, iqn, lun) if transport: dev_format = "pci-0000:00:00.0-" + dev_format fake_dev_path = "/dev/disk/by-path/" + dev_format return fake_dev_path def iscsi_connection(self, volume, location, iqn): return { 'driver_volume_type': 'iscsi', 'data': { 'volume_id': volume['id'], 'target_portal': location, 'target_iqn': iqn, 'target_lun': 1, } } def iscsi_connection_multipath(self, volume, locations, iqns, luns): return { 'driver_volume_type': 'iscsi', 'data': { 'volume_id': volume['id'], 'target_portals': locations, 'target_iqns': iqns, 'target_luns': luns, } } def iscsi_connection_chap(self, volume, location, iqn, auth_method, auth_username, auth_password, discovery_auth_method, discovery_auth_username, discovery_auth_password): return { 'driver_volume_type': 'iscsi', 'data': { 'auth_method': auth_method, 'auth_username': auth_username, 'auth_password': auth_password, 'discovery_auth_method': discovery_auth_method, 'discovery_auth_username': discovery_auth_username, 'discovery_auth_password': discovery_auth_password, 'target_lun': 1, 'volume_id': volume['id'], 'target_iqn': iqn, 'target_portal': location, } } def _initiator_get_text(self, *arg, **kwargs): text = ('## DO NOT EDIT OR REMOVE THIS FILE!\n' '## If you remove this file, the iSCSI daemon ' 'will not start.\n' '## If you change the InitiatorName, existing ' 'access control lists\n' '## may reject this initiator. The InitiatorName must ' 'be unique\n' '## for each iSCSI initiator. Do NOT duplicate iSCSI ' 'InitiatorNames.\n' 'InitiatorName=%s' % self._fake_iqn) return text, None def test_get_initiator(self): def initiator_no_file(*args, **kwargs): raise putils.ProcessExecutionError('No file') self.connector._execute = initiator_no_file initiator = self.connector.get_initiator() self.assertIsNone(initiator) self.connector._execute = self._initiator_get_text initiator = self.connector.get_initiator() self.assertEqual(initiator, self._fake_iqn) def test_get_connector_properties(self): with mock.patch.object(priv_rootwrap, 'execute') as mock_exec: mock_exec.return_value = self._initiator_get_text() multipath = True enforce_multipath = True props = iscsi.ISCSIConnector.get_connector_properties( 'sudo', multipath=multipath, enforce_multipath=enforce_multipath) expected_props = {'initiator': self._fake_iqn} self.assertEqual(expected_props, props) @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsiadm_bare') def test_brick_iscsi_validate_transport(self, mock_iscsiadm): sample_output = ('# BEGIN RECORD 2.0-872\n' 'iface.iscsi_ifacename = %s.fake_suffix\n' 'iface.net_ifacename = \n' 'iface.ipaddress = \n' 'iface.hwaddress = 00:53:00:00:53:00\n' 'iface.transport_name = %s\n' 'iface.initiatorname = \n' '# END RECORD') for tport in self.connector.supported_transports: mock_iscsiadm.return_value = (sample_output % (tport, tport), '') self.assertEqual(tport + '.fake_suffix', self.connector._validate_iface_transport( tport + '.fake_suffix')) mock_iscsiadm.return_value = ("", 'iscsiadm: Could not ' 'read iface fake_transport (6)') self.assertEqual('default', self.connector._validate_iface_transport( 'fake_transport')) def test_get_search_path(self): search_path = self.connector.get_search_path() expected = "/dev/disk/by-path" self.assertEqual(expected, search_path) @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(iscsi.ISCSIConnector, '_get_potential_volume_paths') def test_get_volume_paths(self, mock_potential_paths, mock_exists): name1 = 'volume-00000001-1' vol = {'id': 1, 'name': name1} location = '10.0.2.15:3260' iqn = 'iqn.2010-10.org.openstack:%s' % name1 fake_path = ("/dev/disk/by-path/ip-%(ip)s-iscsi-%(iqn)s-lun-%(lun)s" % {'ip': '10.0.2.15', 'iqn': iqn, 'lun': 1}) fake_devices = [fake_path] expected = fake_devices mock_potential_paths.return_value = fake_devices connection_properties = self.iscsi_connection(vol, [location], [iqn]) volume_paths = self.connector.get_volume_paths( connection_properties['data']) self.assertEqual(expected, volume_paths) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') def test_discover_mpath_device(self, mock_multipath_device, mock_multipath_device_path): location1 = '10.0.2.15:3260' location2 = '[2001:db8::1]:3260' name1 = 'volume-00000001-1' name2 = 'volume-00000001-2' iqn1 = 'iqn.2010-10.org.openstack:%s' % name1 iqn2 = 'iqn.2010-10.org.openstack:%s' % name2 fake_multipath_dev = '/dev/mapper/fake-multipath-dev' fake_raw_dev = '/dev/disk/by-path/fake-raw-lun' vol = {'id': 1, 'name': name1} connection_properties = self.iscsi_connection_multipath( vol, [location1, location2], [iqn1, iqn2], [1, 2]) mock_multipath_device_path.return_value = fake_multipath_dev mock_multipath_device.return_value = test_connector.FAKE_SCSI_WWN (result_path, result_mpath_id) = ( self.connector_with_multipath._discover_mpath_device( test_connector.FAKE_SCSI_WWN, connection_properties['data'], fake_raw_dev)) result = {'path': result_path, 'multipath_id': result_mpath_id} expected_result = {'path': fake_multipath_dev, 'multipath_id': test_connector.FAKE_SCSI_WWN} self.assertEqual(expected_result, result) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device') @mock.patch.object(os.path, 'realpath') def test_discover_mpath_device_by_realpath(self, mock_realpath, mock_multipath_device, mock_multipath_device_path): FAKE_SCSI_WWN = '1234567890' location1 = '10.0.2.15:3260' location2 = '[2001:db8::1]:3260' name1 = 'volume-00000001-1' name2 = 'volume-00000001-2' iqn1 = 'iqn.2010-10.org.openstack:%s' % name1 iqn2 = 'iqn.2010-10.org.openstack:%s' % name2 fake_multipath_dev = None fake_raw_dev = '/dev/disk/by-path/fake-raw-lun' vol = {'id': 1, 'name': name1} connection_properties = self.iscsi_connection_multipath( vol, [location1, location2], [iqn1, iqn2], [1, 2]) mock_multipath_device_path.return_value = fake_multipath_dev mock_multipath_device.return_value = { 'device': '/dev/mapper/%s' % FAKE_SCSI_WWN} mock_realpath.return_value = '/dev/sdvc' (result_path, result_mpath_id) = ( self.connector_with_multipath._discover_mpath_device( FAKE_SCSI_WWN, connection_properties['data'], fake_raw_dev)) mock_multipath_device.assert_called_with('/dev/sdvc') result = {'path': result_path, 'multipath_id': result_mpath_id} expected_result = {'path': '/dev/mapper/%s' % FAKE_SCSI_WWN, 'multipath_id': FAKE_SCSI_WWN} self.assertEqual(expected_result, result) @mock.patch.object( base.BaseLinuxConnector, 'check_multipath', mock.MagicMock()) @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') @mock.patch.object(iscsi.ISCSIConnector, '_connect_multipath_volume') @mock.patch.object(iscsi.ISCSIConnector, '_connect_single_volume') def test_connect_volume_mp(self, con_single_mock, con_mp_mock, clean_mock): self.connector.use_multipath = True res = self.connector.connect_volume(self.CON_PROPS) self.assertEqual(con_mp_mock.return_value, res) con_single_mock.assert_not_called() con_mp_mock.assert_called_once_with(self.CON_PROPS) clean_mock.assert_not_called() @mock.patch.object( base.BaseLinuxConnector, 'check_multipath', mock.MagicMock()) @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') @mock.patch.object(iscsi.ISCSIConnector, '_connect_multipath_volume') @mock.patch.object(iscsi.ISCSIConnector, '_connect_single_volume') def test_connect_volume_mp_failure(self, con_single_mock, con_mp_mock, clean_mock): self.connector.use_multipath = True con_mp_mock.side_effect = exception.BrickException self.assertRaises(exception.BrickException, self.connector.connect_volume, self.CON_PROPS) con_single_mock.assert_not_called() con_mp_mock.assert_called_once_with(self.CON_PROPS) clean_mock.assert_called_once_with(self.CON_PROPS, force=True) @mock.patch.object( base.BaseLinuxConnector, 'check_multipath', mock.MagicMock()) @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') @mock.patch.object(iscsi.ISCSIConnector, '_connect_multipath_volume') @mock.patch.object(iscsi.ISCSIConnector, '_connect_single_volume') def test_connect_volume_sp(self, con_single_mock, con_mp_mock, clean_mock): self.connector.use_multipath = False res = self.connector.connect_volume(self.CON_PROPS) self.assertEqual(con_single_mock.return_value, res) con_mp_mock.assert_not_called() con_single_mock.assert_called_once_with(self.CON_PROPS) clean_mock.assert_not_called() @mock.patch.object( base.BaseLinuxConnector, 'check_multipath', mock.MagicMock()) @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') @mock.patch.object(iscsi.ISCSIConnector, '_connect_multipath_volume') @mock.patch.object(iscsi.ISCSIConnector, '_connect_single_volume') def test_connect_volume_sp_failure(self, con_single_mock, con_mp_mock, clean_mock): self.connector.use_multipath = False con_single_mock.side_effect = exception.BrickException self.assertRaises(exception.BrickException, self.connector.connect_volume, self.CON_PROPS) con_mp_mock.assert_not_called() con_single_mock.assert_called_once_with(self.CON_PROPS) clean_mock.assert_called_once_with(self.CON_PROPS, force=True) def test_discover_iscsi_portals(self): location = '10.0.2.15:3260' name = 'volume-00000001' iqn = 'iqn.2010-10.org.openstack:%s' % name vol = {'id': 1, 'name': name} auth_method = 'CHAP' auth_username = 'fake_chap_username' auth_password = 'fake_chap_password' discovery_auth_method = 'CHAP' discovery_auth_username = 'fake_chap_username' discovery_auth_password = 'fake_chap_password' connection_properties = self.iscsi_connection_chap( vol, location, iqn, auth_method, auth_username, auth_password, discovery_auth_method, discovery_auth_username, discovery_auth_password) self.connector_with_multipath = iscsi.ISCSIConnector( None, execute=self.fake_execute, use_multipath=True) for transport in ['default', 'iser', 'badTransport']: interface = 'iser' if transport == 'iser' else 'default' self.mock_object(self.connector_with_multipath, '_get_transport', mock.Mock(return_value=interface)) self.connector_with_multipath._discover_iscsi_portals( connection_properties['data']) expected_cmds = [ 'iscsiadm -m discoverydb -t sendtargets -I %(iface)s ' '-p %(location)s --op update ' '-n discovery.sendtargets.auth.authmethod -v %(auth_method)s ' '-n discovery.sendtargets.auth.username -v %(username)s ' '-n discovery.sendtargets.auth.password -v %(password)s' % {'iface': interface, 'location': location, 'auth_method': discovery_auth_method, 'username': discovery_auth_username, 'password': discovery_auth_password}, 'iscsiadm -m node --op show -p %s' % location, 'iscsiadm -m discoverydb -t sendtargets -I %(iface)s' ' -p %(location)s --discover' % {'iface': interface, 'location': location}, 'iscsiadm -m node --op show -p %s' % location] self.assertEqual(expected_cmds, self.cmds) # Reset to run with a different transport type self.cmds = list() @mock.patch.object(priv_rootwrap, 'execute', return_value=('', '')) @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsiadm_update_discoverydb') @mock.patch.object(os.path, 'exists', return_value=True) def test_iscsi_portals_with_chap_discovery( self, exists, update_discoverydb, mock_exec): location = '10.0.2.15:3260' name = 'volume-00000001' iqn = 'iqn.2010-10.org.openstack:%s' % name vol = {'id': 1, 'name': name} auth_method = 'CHAP' auth_username = 'fake_chap_username' auth_password = 'fake_chap_password' discovery_auth_method = 'CHAP' discovery_auth_username = 'fake_chap_username' discovery_auth_password = 'fake_chap_password' connection_properties = self.iscsi_connection_chap( vol, location, iqn, auth_method, auth_username, auth_password, discovery_auth_method, discovery_auth_username, discovery_auth_password) self.connector_with_multipath = iscsi.ISCSIConnector( None, execute=self.fake_execute, use_multipath=True) self.cmds = [] # The first call returns an error code = 6, mocking an empty # discovery db. The second one mocks a successful return and the # third one a dummy exit code, which will trigger the # TargetPortalNotFound exception in connect_volume update_discoverydb.side_effect = [ putils.ProcessExecutionError(None, None, 6), ("", ""), putils.ProcessExecutionError(None, None, 9)] self.connector_with_multipath._discover_iscsi_portals( connection_properties['data']) update_discoverydb.assert_called_with(connection_properties['data']) expected_cmds = [ 'iscsiadm -m discoverydb -t sendtargets -p %s -I default' ' --op new' % location, 'iscsiadm -m node --op show -p %s' % location, 'iscsiadm -m discoverydb -t sendtargets -I default -p %s' ' --discover' % location, 'iscsiadm -m node --op show -p %s' % location] self.assertEqual(expected_cmds, self.cmds) self.assertRaises(exception.TargetPortalNotFound, self.connector_with_multipath.connect_volume, connection_properties['data']) def test_get_target_portals_from_iscsiadm_output(self): connector = self.connector test_output = '''10.15.84.19:3260,1 iqn.1992-08.com.netapp:sn.33615311 10.15.85.19:3260,2 iqn.1992-08.com.netapp:sn.33615311 ''' res = connector._get_target_portals_from_iscsiadm_output(test_output) ips = ['10.15.84.19:3260', '10.15.85.19:3260'] iqns = ['iqn.1992-08.com.netapp:sn.33615311', 'iqn.1992-08.com.netapp:sn.33615311'] expected = (ips, iqns) self.assertEqual(expected, res) @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') def test_disconnect_volume(self, cleanup_mock): conn_props = {'target_portal': '198.72.124.185:3260', 'target_iqn': 'iqn.2010-10.org.openstack:volume-uuid', 'target_lun': 0, 'device_path': '/dev/sda'} res = self.connector.disconnect_volume(conn_props, mock.sentinel.dev_info, mock.sentinel.Force, mock.sentinel.ignore_errors) self.assertEqual(cleanup_mock.return_value, res) cleanup_mock.assert_called_once_with( conn_props, force=mock.sentinel.Force, ignore_errors=mock.sentinel.ignore_errors, device_info=mock.sentinel.dev_info, is_disconnect_call=True) @ddt.data(True, False) @mock.patch.object(iscsi.ISCSIConnector, '_get_transport') @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsiadm_bare') def test_get_discoverydb_portals(self, is_iser, iscsiadm_mock, transport_mock): params = { 'iqn1': self.SINGLE_CON_PROPS['target_iqn'], 'iqn2': 'iqn.2004-04.com.qnap:ts-831x:iscsi.cinder-2017.9ef', 'addr': self.SINGLE_CON_PROPS['target_portal'].replace(':', ','), 'ip1': self.SINGLE_CON_PROPS['target_portal'], 'ip2': '192.168.1.3:3260', 'transport': 'iser' if is_iser else 'default', 'other_transport': 'default' if is_iser else 'iser', } iscsiadm_mock.return_value = ( 'SENDTARGETS:\n' 'DiscoveryAddress: 192.168.1.33,3260\n' 'DiscoveryAddress: %(addr)s\n' 'Target: %(iqn1)s\n' ' Portal: %(ip2)s,1\n' ' Iface Name: %(transport)s\n' ' Portal: %(ip1)s,1\n' ' Iface Name: %(transport)s\n' ' Portal: %(ip1)s,1\n' ' Iface Name: %(other_transport)s\n' 'Target: %(iqn2)s\n' ' Portal: %(ip2)s,1\n' ' Iface Name: %(transport)s\n' ' Portal: %(ip1)s,1\n' ' Iface Name: %(transport)s\n' 'DiscoveryAddress: 192.168.1.38,3260\n' 'iSNS:\n' 'No targets found.\n' 'STATIC:\n' 'No targets found.\n' 'FIRMWARE:\n' 'No targets found.\n' % params, None) transport_mock.return_value = 'iser' if is_iser else 'non-iser' res = self.connector._get_discoverydb_portals(self.SINGLE_CON_PROPS) expected = [(params['ip2'], params['iqn1'], self.SINGLE_CON_PROPS['target_lun']), (params['ip1'], params['iqn1'], self.SINGLE_CON_PROPS['target_lun']), (params['ip2'], params['iqn2'], self.SINGLE_CON_PROPS['target_lun']), (params['ip1'], params['iqn2'], self.SINGLE_CON_PROPS['target_lun'])] self.assertListEqual(expected, res) iscsiadm_mock.assert_called_once_with( ['-m', 'discoverydb', '-o', 'show', '-P', 1]) transport_mock.assert_called_once_with() @mock.patch.object(iscsi.ISCSIConnector, '_get_transport', return_value='') @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsiadm_bare') def test_get_discoverydb_portals_error(self, iscsiadm_mock, transport_mock): """DiscoveryAddress is not present.""" iscsiadm_mock.return_value = ( 'SENDTARGETS:\n' 'DiscoveryAddress: 192.168.1.33,3260\n' 'DiscoveryAddress: 192.168.1.38,3260\n' 'iSNS:\n' 'No targets found.\n' 'STATIC:\n' 'No targets found.\n' 'FIRMWARE:\n' 'No targets found.\n', None) self.assertRaises(exception.TargetPortalsNotFound, self.connector._get_discoverydb_portals, self.SINGLE_CON_PROPS) iscsiadm_mock.assert_called_once_with( ['-m', 'discoverydb', '-o', 'show', '-P', 1]) transport_mock.assert_not_called() @mock.patch.object(iscsi.ISCSIConnector, '_get_transport', return_value='') @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsiadm_bare') def test_get_discoverydb_portals_error_is_present(self, iscsiadm_mock, transport_mock): """DiscoveryAddress is present but wrong iterface.""" params = { 'iqn': self.SINGLE_CON_PROPS['target_iqn'], 'addr': self.SINGLE_CON_PROPS['target_portal'].replace(':', ','), 'ip': self.SINGLE_CON_PROPS['target_portal'], } iscsiadm_mock.return_value = ( 'SENDTARGETS:\n' 'DiscoveryAddress: 192.168.1.33,3260\n' 'DiscoveryAddress: %(addr)s\n' 'Target: %(iqn)s\n' ' Portal: %(ip)s,1\n' ' Iface Name: iser\n' 'DiscoveryAddress: 192.168.1.38,3260\n' 'iSNS:\n' 'No targets found.\n' 'STATIC:\n' 'No targets found.\n' 'FIRMWARE:\n' 'No targets found.\n' % params, None) self.assertRaises(exception.TargetPortalsNotFound, self.connector._get_discoverydb_portals, self.SINGLE_CON_PROPS) iscsiadm_mock.assert_called_once_with( ['-m', 'discoverydb', '-o', 'show', '-P', 1]) transport_mock.assert_called_once_with() @ddt.data(('/dev/sda', False), ('/dev/disk/by-id/scsi-WWID', False), ('/dev/dm-11', True), ('/dev/disk/by-id/dm-uuid-mpath-MPATH', True)) @ddt.unpack @mock.patch('os_brick.utils.get_dev_path') @mock.patch.object(iscsi.ISCSIConnector, '_disconnect_connection') @mock.patch.object(iscsi.ISCSIConnector, '_get_connection_devices') @mock.patch.object(linuxscsi.LinuxSCSI, 'flush_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_connection', return_value=None) def test_cleanup_connection(self, path_used, was_multipath, remove_mock, flush_mock, con_devs_mock, discon_mock, get_dev_path_mock): get_dev_path_mock.return_value = path_used # Return an ordered dicts instead of normal dict for discon_mock.assert con_devs_mock.return_value = collections.OrderedDict(( (('ip1:port1', 'tgt1'), ({'sda'}, set())), (('ip2:port2', 'tgt2'), ({'sdb'}, {'sdc'})), (('ip3:port3', 'tgt3'), (set(), set())))) self.connector._cleanup_connection( self.CON_PROPS, ips_iqns_luns=mock.sentinel.ips_iqns_luns, force=False, ignore_errors=False, device_info=mock.sentinel.device_info) get_dev_path_mock.assert_called_once_with(self.CON_PROPS, mock.sentinel.device_info) con_devs_mock.assert_called_once_with(self.CON_PROPS, mock.sentinel.ips_iqns_luns, False) remove_mock.assert_called_once_with({'sda', 'sdb'}, False, mock.ANY, path_used, was_multipath) discon_mock.assert_called_once_with( self.CON_PROPS, [('ip1:port1', 'tgt1'), ('ip3:port3', 'tgt3')], False, mock.ANY) flush_mock.assert_not_called() @mock.patch('os_brick.exception.ExceptionChainer.__nonzero__', mock.Mock(return_value=True)) @mock.patch('os_brick.exception.ExceptionChainer.__bool__', mock.Mock(return_value=True)) @mock.patch.object(iscsi.ISCSIConnector, '_disconnect_connection') @mock.patch.object(iscsi.ISCSIConnector, '_get_connection_devices') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_connection', return_value=mock.sentinel.mp_name) @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_del_map') def test_cleanup_connection_force_failure(self, remove_map_mock, remove_mock, con_devs_mock, discon_mock): # Return an ordered dicts instead of normal dict for discon_mock.assert con_devs_mock.return_value = collections.OrderedDict(( (('ip1:port1', 'tgt1'), ({'sda'}, set())), (('ip2:port2', 'tgt2'), ({'sdb'}, {'sdc'})), (('ip3:port3', 'tgt3'), (set(), set())))) self.assertRaises(exception.ExceptionChainer, self.connector._cleanup_connection, self.CON_PROPS, ips_iqns_luns=mock.sentinel.ips_iqns_luns, force=mock.sentinel.force, ignore_errors=False) con_devs_mock.assert_called_once_with(self.CON_PROPS, mock.sentinel.ips_iqns_luns, False) remove_mock.assert_called_once_with({'sda', 'sdb'}, mock.sentinel.force, mock.ANY, '', False) discon_mock.assert_called_once_with( self.CON_PROPS, [('ip1:port1', 'tgt1'), ('ip3:port3', 'tgt3')], mock.sentinel.force, mock.ANY) remove_map_mock.assert_called_once_with(mock.sentinel.mp_name) def test_cleanup_connection_no_data_discoverydb(self): self.connector.use_multipath = True with mock.patch.object(self.connector, '_get_discoverydb_portals', side_effect=exception.TargetPortalsNotFound), \ mock.patch.object(self.connector._linuxscsi, 'remove_connection') as mock_remove: # This will not raise and exception self.connector._cleanup_connection(self.SINGLE_CON_PROPS) mock_remove.assert_not_called() @ddt.data({'do_raise': False, 'force': False}, {'do_raise': True, 'force': True}, {'do_raise': True, 'force': False}) @ddt.unpack @mock.patch.object(iscsi.ISCSIConnector, '_disconnect_from_iscsi_portal') def test_disconnect_connection(self, disconnect_mock, do_raise, force): will_raise = do_raise and not force actual_call_args = [] # Since we reuse the copied dictionary on _disconnect_connection # changing its values we cannot use mock's assert_has_calls def my_disconnect(con_props): actual_call_args.append(con_props.copy()) if do_raise: raise exception.ExceptionChainer() disconnect_mock.side_effect = my_disconnect connections = (('ip1:port1', 'tgt1'), ('ip2:port2', 'tgt2')) original_props = self.CON_PROPS.copy() exc = exception.ExceptionChainer() if will_raise: self.assertRaises(exception.ExceptionChainer, self.connector._disconnect_connection, self.CON_PROPS, connections, force=force, exc=exc) else: self.connector._disconnect_connection(self.CON_PROPS, connections, force=force, exc=exc) # Passed properties should not be altered by the method call self.assertDictEqual(original_props, self.CON_PROPS) expected = [original_props.copy(), original_props.copy()] for i, (ip, iqn) in enumerate(connections): expected[i].update(target_portal=ip, target_iqn=iqn) # If we are failing and not forcing we won't make all the alls if will_raise: expected = expected[:1] self.assertListEqual(expected, actual_call_args) # No exceptions have been caught by ExceptionChainer context manager self.assertEqual(do_raise, bool(exc)) def test_disconnect_from_iscsi_portal(self): self.connector._disconnect_from_iscsi_portal(self.CON_PROPS) expected_prefix = ('iscsiadm -m node -T %s -p %s ' % (self.CON_PROPS['target_iqn'], self.CON_PROPS['target_portal'])) expected = [ expected_prefix + '--op update -n node.startup -v manual', expected_prefix + '--logout', expected_prefix + '--op delete', ] self.assertListEqual(expected, self.cmds) def test_iscsiadm_discover_parsing(self): # Ensure that parsing iscsiadm discover ignores cruft. ips = ["192.168.204.82:3260", "192.168.204.82:3261"] iqns = ["iqn.2010-10.org.openstack:volume-" "f9b12623-6ce3-4dac-a71f-09ad4249bdd3", "iqn.2010-10.org.openstack:volume-" "f9b12623-6ce3-4dac-a71f-09ad4249bdd4"] # This slight wonkiness brought to you by pep8, as the actual # example output runs about 97 chars wide. sample_input = """Loading iscsi modules: done Starting iSCSI initiator service: done Setting up iSCSI targets: unused %s %s %s %s """ % (ips[0] + ',1', iqns[0], ips[1] + ',1', iqns[1]) out = self.connector.\ _get_target_portals_from_iscsiadm_output(sample_input) self.assertEqual((ips, iqns), out) def test_sanitize_log_run_iscsiadm(self): # Tests that the parameters to the _run_iscsiadm function # are sanitized for when passwords are logged. def fake_debug(*args, **kwargs): self.assertIn('node.session.auth.password', args[0]) self.assertNotIn('scrubme', args[0]) volume = {'id': 'fake_uuid'} connection_info = self.iscsi_connection(volume, "10.0.2.15:3260", "fake_iqn") iscsi_properties = connection_info['data'] with mock.patch.object(iscsi.LOG, 'debug', side_effect=fake_debug) as debug_mock: self.connector._iscsiadm_update(iscsi_properties, 'node.session.auth.password', 'scrubme') # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) @mock.patch.object(iscsi.ISCSIConnector, 'get_volume_paths') def test_extend_volume_no_path(self, mock_volume_paths): mock_volume_paths.return_value = [] volume = {'id': 'fake_uuid'} connection_info = self.iscsi_connection(volume, "10.0.2.15:3260", "fake_iqn") self.assertRaises(exception.VolumePathsNotFound, self.connector.extend_volume, connection_info['data']) @mock.patch.object(linuxscsi.LinuxSCSI, 'extend_volume') @mock.patch.object(iscsi.ISCSIConnector, 'get_volume_paths') def test_extend_volume(self, mock_volume_paths, mock_scsi_extend): fake_new_size = 1024 mock_volume_paths.return_value = ['/dev/vdx'] mock_scsi_extend.return_value = fake_new_size volume = {'id': 'fake_uuid'} connection_info = self.iscsi_connection(volume, "10.0.2.15:3260", "fake_iqn") new_size = self.connector.extend_volume(connection_info['data']) self.assertEqual(fake_new_size, new_size) @mock.patch.object(iscsi.LOG, 'info') @mock.patch.object(linuxscsi.LinuxSCSI, 'extend_volume') @mock.patch.object(iscsi.ISCSIConnector, 'get_volume_paths') def test_extend_volume_mask_password(self, mock_volume_paths, mock_scsi_extend, mock_log_info): fake_new_size = 1024 mock_volume_paths.return_value = ['/dev/vdx'] mock_scsi_extend.return_value = fake_new_size volume = {'id': 'fake_uuid'} connection_info = self.iscsi_connection_chap( volume, "10.0.2.15:3260", "fake_iqn", 'CHAP', 'fake_user', 'fake_password', 'CHAP1', 'fake_user1', 'fake_password1') self.connector.extend_volume(connection_info['data']) self.assertEqual(2, mock_log_info.call_count) self.assertIn("'auth_password': '***'", str(mock_log_info.call_args_list[0])) self.assertIn("'discovery_auth_password': '***'", str(mock_log_info.call_args_list[0])) @mock.patch.object(iscsi.LOG, 'warning') @mock.patch.object(linuxscsi.LinuxSCSI, 'extend_volume') @mock.patch.object(iscsi.ISCSIConnector, 'get_volume_paths') def test_extend_volume_mask_password_no_paths(self, mock_volume_paths, mock_scsi_extend, mock_log_warning): fake_new_size = 1024 mock_volume_paths.return_value = [] mock_scsi_extend.return_value = fake_new_size volume = {'id': 'fake_uuid'} connection_info = self.iscsi_connection_chap( volume, "10.0.2.15:3260", "fake_iqn", 'CHAP', 'fake_user', 'fake_password', 'CHAP1', 'fake_user1', 'fake_password1') self.assertRaises(exception.VolumePathsNotFound, self.connector.extend_volume, connection_info['data']) self.assertEqual(1, mock_log_warning.call_count) self.assertIn("'auth_password': '***'", str(mock_log_warning.call_args_list[0])) self.assertIn("'discovery_auth_password': '***'", str(mock_log_warning.call_args_list[0])) @mock.patch.object(os.path, 'isdir') def test_get_all_available_volumes_path_not_dir(self, mock_isdir): mock_isdir.return_value = False expected = [] actual = self.connector.get_all_available_volumes() self.assertCountEqual(expected, actual) @mock.patch.object(iscsi.ISCSIConnector, '_get_device_path') def test_get_potential_paths_mpath(self, get_path_mock): self.connector.use_multipath = True res = self.connector._get_potential_volume_paths(self.CON_PROPS) get_path_mock.assert_called_once_with(self.CON_PROPS) self.assertEqual(get_path_mock.return_value, res) self.assertEqual([], self.cmds) @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions') @mock.patch.object(iscsi.ISCSIConnector, '_get_device_path') def test_get_potential_paths_single_path(self, get_path_mock, get_sessions_mock): get_path_mock.side_effect = [['path1'], ['path2'], ['path3', 'path4']] get_sessions_mock.return_value = [ 'ip1:port1', 'ip2:port2', 'ip3:port3'] self.connector.use_multipath = False res = self.connector._get_potential_volume_paths(self.CON_PROPS) self.assertEqual({'path1', 'path2', 'path3', 'path4'}, set(res)) get_sessions_mock.assert_called_once_with() @mock.patch.object(iscsi.ISCSIConnector, '_discover_iscsi_portals') def test_get_ips_iqns_luns_with_target_iqns(self, discover_mock): res = self.connector._get_ips_iqns_luns(self.CON_PROPS) expected = list(self.connector._get_all_targets(self.CON_PROPS)) self.assertListEqual(expected, res) discover_mock.assert_not_called() @mock.patch.object(iscsi.ISCSIConnector, '_get_discoverydb_portals') @mock.patch.object(iscsi.ISCSIConnector, '_discover_iscsi_portals') def test_get_ips_iqns_luns_discoverydb(self, discover_mock, db_portals_mock): db_portals_mock.return_value = [('ip1:port1', 'tgt1', '1'), ('ip2:port2', 'tgt2', '2')] res = self.connector._get_ips_iqns_luns(self.SINGLE_CON_PROPS, discover=False) self.assertListEqual(db_portals_mock.return_value, res) db_portals_mock.assert_called_once_with(self.SINGLE_CON_PROPS) discover_mock.assert_not_called() @mock.patch.object(iscsi.ISCSIConnector, '_get_all_targets') @mock.patch.object(iscsi.ISCSIConnector, '_get_discoverydb_portals') @mock.patch.object(iscsi.ISCSIConnector, '_discover_iscsi_portals') def test_get_ips_iqns_luns_disconnect_single_path(self, discover_mock, db_portals_mock, get_targets_mock): db_portals_mock.side_effect = exception.TargetPortalsNotFound res = self.connector._get_ips_iqns_luns(self.SINGLE_CON_PROPS, discover=False, is_disconnect_call=True) db_portals_mock.assert_called_once_with(self.SINGLE_CON_PROPS) discover_mock.assert_not_called() get_targets_mock.assert_called_once_with(self.SINGLE_CON_PROPS) self.assertEqual(get_targets_mock.return_value, res) @mock.patch.object(iscsi.ISCSIConnector, '_discover_iscsi_portals') def test_get_ips_iqns_luns_no_target_iqns_share_iqn(self, discover_mock): discover_mock.return_value = [('ip1:port1', 'tgt1', '1'), ('ip1:port1', 'tgt2', '1'), ('ip2:port2', 'tgt1', '2'), ('ip2:port2', 'tgt2', '2')] res = self.connector._get_ips_iqns_luns(self.SINGLE_CON_PROPS) expected = {('ip1:port1', 'tgt1', '1'), ('ip2:port2', 'tgt1', '2')} self.assertEqual(expected, set(res)) @mock.patch.object(iscsi.ISCSIConnector, '_discover_iscsi_portals') def test_get_ips_iqns_luns_no_target_iqns_diff_iqn(self, discover_mock): discover_mock.return_value = [('ip1:port1', 'tgt1', '1'), ('ip2:port2', 'tgt2', '2')] res = self.connector._get_ips_iqns_luns(self.SINGLE_CON_PROPS) self.assertEqual(discover_mock.return_value, res) @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') def test_connect_to_iscsi_portal_all_new(self, get_sessions_mock): """Connect creating node and session.""" session = 'session2' get_sessions_mock.side_effect = [ [('tcp:', 'session1', 'ip1:port1', '1', 'tgt')], [('tcp:', 'session1', 'ip1:port1', '1', 'tgt'), ('tcp:', session, 'ip1:port1', '-1', 'tgt1')] ] utils.ISCSI_SUPPORTS_MANUAL_SCAN = None with mock.patch.object(self.connector, '_execute') as exec_mock: exec_mock.side_effect = [('', 'error'), ('', None), ('', None), ('', None), ('', None)] res = self.connector._connect_to_iscsi_portal(self.CON_PROPS) # True refers to "manual scans", since the call to update # node.session.scan didn't fail they are set to manual self.assertEqual((session, True), res) self.assertTrue(utils.ISCSI_SUPPORTS_MANUAL_SCAN) prefix = 'iscsiadm -m node -T tgt1 -p ip1:port1' expected_cmds = [ prefix, prefix + ' --interface default --op new', prefix + ' --op update -n node.session.scan -v manual', prefix + ' --login', prefix + ' --op update -n node.startup -v automatic' ] actual_cmds = [' '.join(args[0]) for args in exec_mock.call_args_list] self.assertListEqual(expected_cmds, actual_cmds) self.assertEqual(2, get_sessions_mock.call_count) @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') def test_connect_to_iscsi_portal_ip_case_insensitive(self, get_sessions_mock): """Connect creating node and session.""" session = 'session2' get_sessions_mock.side_effect = [ [('tcp:', 'session1', 'iP1:port1', '1', 'tgt')], [('tcp:', 'session1', 'Ip1:port1', '1', 'tgt'), ('tcp:', session, 'IP1:port1', '-1', 'tgt1')] ] utils.ISCSI_SUPPORTS_MANUAL_SCAN = None with mock.patch.object(self.connector, '_execute') as exec_mock: exec_mock.side_effect = [('', 'error'), ('', None), ('', None), ('', None), ('', None)] res = self.connector._connect_to_iscsi_portal(self.CON_PROPS) # True refers to "manual scans", since the call to update # node.session.scan didn't fail they are set to manual self.assertEqual((session, True), res) self.assertTrue(utils.ISCSI_SUPPORTS_MANUAL_SCAN) prefix = 'iscsiadm -m node -T tgt1 -p ip1:port1' expected_cmds = [ prefix, prefix + ' --interface default --op new', prefix + ' --op update -n node.session.scan -v manual', prefix + ' --login', prefix + ' --op update -n node.startup -v automatic' ] actual_cmds = [' '.join(args[0]) for args in exec_mock.call_args_list] self.assertListEqual(expected_cmds, actual_cmds) self.assertEqual(2, get_sessions_mock.call_count) @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') def test_connect_to_iscsi_portal_all_exists_chap(self, get_sessions_mock): """Node and session already exists and we use chap authentication.""" session = 'session2' get_sessions_mock.return_value = [('tcp:', session, 'ip1:port1', '-1', 'tgt1')] con_props = self.CON_PROPS.copy() con_props.update(auth_method='CHAP', auth_username='user', auth_password='pwd') utils.ISCSI_SUPPORTS_MANUAL_SCAN = None res = self.connector._connect_to_iscsi_portal(con_props) # False refers to "manual scans", so we have manual iscsi scans self.assertEqual((session, True), res) self.assertTrue(utils.ISCSI_SUPPORTS_MANUAL_SCAN) prefix = 'iscsiadm -m node -T tgt1 -p ip1:port1' expected_cmds = [ prefix, prefix + ' --op update -n node.session.scan -v manual', prefix + ' --op update -n node.session.auth.authmethod -v CHAP', prefix + ' --op update -n node.session.auth.username -v user', prefix + ' --op update -n node.session.auth.password -v pwd', ] self.assertListEqual(expected_cmds, self.cmds) get_sessions_mock.assert_called_once_with() @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') def test_connect_to_iscsi_portal_fail_login(self, get_sessions_mock): get_sessions_mock.return_value = [] with mock.patch.object(self.connector, '_execute') as exec_mock: exec_mock.side_effect = [('', None), ('', None), putils.ProcessExecutionError] res = self.connector._connect_to_iscsi_portal(self.CON_PROPS) self.assertEqual((None, None), res) expected_cmds = ['iscsiadm -m node -T tgt1 -p ip1:port1', 'iscsiadm -m node -T tgt1 -p ip1:port1 ' '--op update -n node.session.scan -v manual', 'iscsiadm -m node -T tgt1 -p ip1:port1 --login'] actual_cmds = [' '.join(args[0]) for args in exec_mock.call_args_list] self.assertListEqual(expected_cmds, actual_cmds) get_sessions_mock.assert_called_once_with() @mock.patch.object(iscsi.ISCSIConnector, '_iscsiadm_update') @mock.patch.object(iscsi.ISCSIConnector, '_get_transport', return_value='default') @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') @mock.patch('os_brick.utils._time_sleep') def test_connect_to_iscsi_portal_fail_op_new(self, sleep_mock, get_sessions_mock, get_transport_mock, iscsiadm_update_mock): get_sessions_mock.return_value = [] with mock.patch.object(self.connector, '_execute') as exec_mock: exec_mock.side_effect = [('', 21), ('', 6), ('', 21), ('', 6), ('', 21), ('', 6)] self.assertRaises(exception.BrickException, self.connector._connect_to_iscsi_portal, self.CON_PROPS) expected_cmds = ['iscsiadm -m node -T tgt1 -p ip1:port1', 'iscsiadm -m node -T tgt1 -p ip1:port1 ' '--interface default --op new', 'iscsiadm -m node -T tgt1 -p ip1:port1', 'iscsiadm -m node -T tgt1 -p ip1:port1 ' '--interface default --op new', 'iscsiadm -m node -T tgt1 -p ip1:port1', 'iscsiadm -m node -T tgt1 -p ip1:port1 ' '--interface default --op new'] actual_cmds = [' '.join(args[0]) for args in exec_mock.call_args_list] self.assertListEqual(expected_cmds, actual_cmds) iscsiadm_update_mock.assert_not_called() # Called twice by the retry mechanism self.assertEqual(2, sleep_mock.call_count) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', side_effect=(None, 'tgt2')) @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') @mock.patch('os_brick.utils._time_sleep') def test_connect_single_volume(self, sleep_mock, cleanup_mock, connect_mock, get_wwn_mock): def my_connect(rescans, props, data): if props['target_iqn'] == 'tgt2': # Succeed on second call data['found_devices'].append('sdz') connect_mock.side_effect = my_connect res = self.connector._connect_single_volume(self.CON_PROPS) expected = {'type': 'block', 'scsi_wwn': 'tgt2', 'path': '/dev/sdz'} self.assertEqual(expected, res) get_wwn_mock.assert_has_calls([mock.call(['sdz']), mock.call(['sdz'])]) sleep_mock.assert_called_once_with(1) cleanup_mock.assert_called_once_with( {'target_lun': 4, 'volume_id': 'vol_id', 'target_portal': 'ip1:port1', 'target_iqn': 'tgt1'}, [('ip1:port1', 'tgt1', 4), ], force=True, ignore_errors=True) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', return_value='') @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') @mock.patch('os_brick.utils._time_sleep') def test_connect_single_volume_no_wwn(self, sleep_mock, cleanup_mock, connect_mock, get_wwn_mock): def my_connect(rescans, props, data): data['found_devices'].append('sdz') connect_mock.side_effect = my_connect res = self.connector._connect_single_volume(self.CON_PROPS) expected = {'type': 'block', 'scsi_wwn': '', 'path': '/dev/sdz'} self.assertEqual(expected, res) get_wwn_mock.assert_has_calls([mock.call(['sdz'])] * 10) self.assertEqual(10, get_wwn_mock.call_count) sleep_mock.assert_has_calls([mock.call(1)] * 10) self.assertEqual(10, sleep_mock.call_count) cleanup_mock.assert_not_called() @staticmethod def _get_connect_vol_data(): return {'stop_connecting': False, 'num_logins': 0, 'failed_logins': 0, 'stopped_threads': 0, 'found_devices': [], 'just_added_devices': []} @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', side_effect=(None, 'tgt2')) @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch.object(iscsi.ISCSIConnector, '_cleanup_connection') @mock.patch('os_brick.utils._time_sleep') def test_connect_single_volume_not_found(self, sleep_mock, cleanup_mock, connect_mock, get_wwn_mock): self.assertRaises(exception.VolumeDeviceNotFound, self.connector._connect_single_volume, self.CON_PROPS) get_wwn_mock.assert_not_called() # Called twice by the retry mechanism self.assertEqual(2, sleep_mock.call_count) props = list(self.connector._get_all_targets(self.CON_PROPS)) calls_per_try = [ mock.call({'target_portal': prop[0], 'target_iqn': prop[1], 'target_lun': prop[2], 'volume_id': 'vol_id'}, [prop, ], force=True, ignore_errors=True) for prop in props ] cleanup_mock.assert_has_calls(calls_per_try * 3) data = self._get_connect_vol_data() calls_per_try = [mock.call(self.connector.device_scan_attempts, {'target_portal': prop[0], 'target_iqn': prop[1], 'target_lun': prop[2], 'volume_id': 'vol_id'}, data) for prop in props] connect_mock.assert_has_calls(calls_per_try * 3) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm', side_effect=[None, 'dm-0']) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', return_value='wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_wwid') @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch('os_brick.utils._time_sleep') def test_connect_multipath_volume_all_succeed(self, sleep_mock, connect_mock, add_wwid_mock, add_path_mock, get_wwn_mock, find_dm_mock): def my_connect(rescans, props, data): devs = {'tgt1': 'sda', 'tgt2': 'sdb', 'tgt3': 'sdc', 'tgt4': 'sdd'} data['stopped_threads'] += 1 data['num_logins'] += 1 dev = devs[props['target_iqn']] data['found_devices'].append(dev) data['just_added_devices'].append(dev) connect_mock.side_effect = my_connect res = self.connector._connect_multipath_volume(self.CON_PROPS) expected = {'type': 'block', 'scsi_wwn': 'wwn', 'multipath_id': 'wwn', 'path': '/dev/dm-0'} self.assertEqual(expected, res) self.assertEqual(1, get_wwn_mock.call_count) result = list(get_wwn_mock.call_args[0][0]) result.sort() self.assertEqual(['sda', 'sdb', 'sdc', 'sdd'], result) # Check we pass the mpath self.assertIsNone(get_wwn_mock.call_args[0][1]) add_wwid_mock.assert_called_once_with('wwn') self.assertNotEqual(0, add_path_mock.call_count) self.assertGreaterEqual(find_dm_mock.call_count, 2) self.assertEqual(4, connect_mock.call_count) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm', side_effect=[None, 'dm-0']) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', return_value='') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_wwid') @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch('os_brick.utils._time_sleep') def test_connect_multipath_volume_no_wwid(self, sleep_mock, connect_mock, add_wwid_mock, add_path_mock, get_wwn_mock, find_dm_mock): # Even if we don't have the wwn we'll be able to find the multipath def my_connect(rescans, props, data): devs = {'tgt1': 'sda', 'tgt2': 'sdb', 'tgt3': 'sdc', 'tgt4': 'sdd'} data['stopped_threads'] += 1 data['num_logins'] += 1 dev = devs[props['target_iqn']] data['found_devices'].append(dev) data['just_added_devices'].append(dev) connect_mock.side_effect = my_connect with mock.patch.object(self.connector, 'use_multipath'): res = self.connector._connect_multipath_volume(self.CON_PROPS) expected = {'type': 'block', 'scsi_wwn': '', 'multipath_id': '', 'path': '/dev/dm-0'} self.assertEqual(expected, res) self.assertEqual(3, get_wwn_mock.call_count) result = list(get_wwn_mock.call_args[0][0]) result.sort() self.assertEqual(['sda', 'sdb', 'sdc', 'sdd'], result) # Initially mpath we pass is None, but on last call is the mpath mpath_values = [c[1][1] for c in get_wwn_mock._mock_mock_calls] self.assertEqual([None, None, 'dm-0'], mpath_values) add_wwid_mock.assert_not_called() add_path_mock.assert_not_called() self.assertGreaterEqual(find_dm_mock.call_count, 2) self.assertEqual(4, connect_mock.call_count) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm', side_effect=[None, 'dm-0']) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', return_value='wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_wwid') @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch('os_brick.utils._time_sleep') def test_connect_multipath_volume_all_fail(self, sleep_mock, connect_mock, add_wwid_mock, add_path_mock, get_wwn_mock, find_dm_mock): def my_connect(rescans, props, data): data['stopped_threads'] += 1 data['failed_logins'] += 1 connect_mock.side_effect = my_connect self.assertRaises(exception.VolumeDeviceNotFound, self.connector._connect_multipath_volume, self.CON_PROPS) get_wwn_mock.assert_not_called() add_wwid_mock.assert_not_called() add_path_mock.assert_not_called() find_dm_mock.assert_not_called() self.assertEqual(4 * 3, connect_mock.call_count) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm', side_effect=[None, 'dm-0']) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', return_value='wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_wwid') @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch('os_brick.utils._time_sleep') def test_connect_multipath_volume_some_fail_mp_found(self, sleep_mock, connect_mock, add_wwid_mock, add_path_mock, get_wwn_mock, find_dm_mock): def my_connect(rescans, props, data): devs = {'tgt1': '', 'tgt2': 'sdb', 'tgt3': '', 'tgt4': 'sdd'} data['stopped_threads'] += 1 dev = devs[props['target_iqn']] if dev: data['num_logins'] += 1 data['found_devices'].append(dev) data['just_added_devices'].append(dev) else: data['failed_logins'] += 1 connect_mock.side_effect = my_connect res = self.connector._connect_multipath_volume(self.CON_PROPS) expected = {'type': 'block', 'scsi_wwn': 'wwn', 'multipath_id': 'wwn', 'path': '/dev/dm-0'} self.assertEqual(expected, res) self.assertEqual(1, get_wwn_mock.call_count) result = list(get_wwn_mock.call_args[0][0]) result.sort() self.assertEqual(['sdb', 'sdd'], result) add_wwid_mock.assert_called_once_with('wwn') self.assertNotEqual(0, add_path_mock.call_count) self.assertGreaterEqual(find_dm_mock.call_count, 2) self.assertEqual(4, connect_mock.call_count) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm', return_value=None) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', return_value='wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_wwid') @mock.patch.object(iscsi.time, 'time', side_effect=(0, 0, 11, 0)) @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch('os_brick.utils._time_sleep') def test_connect_multipath_volume_some_fail_mp_not_found(self, sleep_mock, connect_mock, time_mock, add_wwid_mock, add_path_mock, get_wwn_mock, find_dm_mock): def my_connect(rescans, props, data): devs = {'tgt1': '', 'tgt2': 'sdb', 'tgt3': '', 'tgt4': 'sdd'} data['stopped_threads'] += 1 dev = devs[props['target_iqn']] if dev: data['num_logins'] += 1 data['found_devices'].append(dev) data['just_added_devices'].append(dev) else: data['failed_logins'] += 1 connect_mock.side_effect = my_connect res = self.connector._connect_multipath_volume(self.CON_PROPS) expected = [{'type': 'block', 'scsi_wwn': 'wwn', 'path': '/dev/sdb'}, {'type': 'block', 'scsi_wwn': 'wwn', 'path': '/dev/sdd'}] # It can only be one of the 2 self.assertIn(res, expected) self.assertEqual(1, get_wwn_mock.call_count) result = list(get_wwn_mock.call_args[0][0]) result.sort() self.assertEqual(['sdb', 'sdd'], result) add_wwid_mock.assert_called_once_with('wwn') self.assertNotEqual(0, add_path_mock.call_count) self.assertGreaterEqual(find_dm_mock.call_count, 4) self.assertEqual(4, connect_mock.call_count) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm', return_value=None) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwn', return_value='wwn') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_add_wwid') @mock.patch.object(iscsi.time, 'time', side_effect=(0, 0, 11, 0)) @mock.patch.object(iscsi.ISCSIConnector, '_connect_vol') @mock.patch('os_brick.utils._time_sleep', mock.Mock()) def test_connect_multipath_volume_all_loging_not_found(self, connect_mock, time_mock, add_wwid_mock, add_path_mock, get_wwn_mock, find_dm_mock): def my_connect(rescans, props, data): data['stopped_threads'] += 1 data['num_logins'] += 1 connect_mock.side_effect = my_connect self.assertRaises(exception.VolumeDeviceNotFound, self.connector._connect_multipath_volume, self.CON_PROPS) get_wwn_mock.assert_not_called() add_wwid_mock.assert_not_called() add_path_mock.assert_not_called() find_dm_mock.assert_not_called() self.assertEqual(12, connect_mock.call_count) @mock.patch('os_brick.utils._time_sleep') @mock.patch.object(linuxscsi.LinuxSCSI, 'scan_iscsi') @mock.patch.object(linuxscsi.LinuxSCSI, 'device_name_by_hctl', return_value='sda') @mock.patch.object(iscsi.ISCSIConnector, '_connect_to_iscsi_portal') def test_connect_vol(self, connect_mock, dev_name_mock, scan_mock, sleep_mock): lscsi = self.connector._linuxscsi data = self._get_connect_vol_data() hctl = [mock.sentinel.host, mock.sentinel.channel, mock.sentinel.target, mock.sentinel.lun] connect_mock.return_value = (mock.sentinel.session, False) with mock.patch.object(lscsi, 'get_hctl', side_effect=(None, hctl)) as hctl_mock: self.connector._connect_vol(3, self.CON_PROPS, data) expected = self._get_connect_vol_data() expected.update(num_logins=1, stopped_threads=1, found_devices=['sda'], just_added_devices=['sda']) self.assertDictEqual(expected, data) connect_mock.assert_called_once_with(self.CON_PROPS) hctl_mock.assert_has_calls([mock.call(mock.sentinel.session, self.CON_PROPS['target_lun']), mock.call(mock.sentinel.session, self.CON_PROPS['target_lun'])]) scan_mock.assert_not_called() dev_name_mock.assert_called_once_with(mock.sentinel.session, hctl) sleep_mock.assert_called_once_with(1) @mock.patch('os_brick.utils._time_sleep') @mock.patch.object(linuxscsi.LinuxSCSI, 'scan_iscsi') @mock.patch.object(linuxscsi.LinuxSCSI, 'device_name_by_hctl', side_effect=(None, None, None, None, 'sda')) @mock.patch.object(iscsi.ISCSIConnector, '_connect_to_iscsi_portal') def test_connect_vol_rescan(self, connect_mock, dev_name_mock, scan_mock, sleep_mock): lscsi = self.connector._linuxscsi data = self._get_connect_vol_data() hctl = [mock.sentinel.host, mock.sentinel.channel, mock.sentinel.target, mock.sentinel.lun] connect_mock.return_value = (mock.sentinel.session, False) with mock.patch.object(lscsi, 'get_hctl', return_value=hctl) as hctl_mock: self.connector._connect_vol(3, self.CON_PROPS, data) expected = self._get_connect_vol_data() expected.update(num_logins=1, stopped_threads=1, found_devices=['sda'], just_added_devices=['sda']) self.assertDictEqual(expected, data) connect_mock.assert_called_once_with(self.CON_PROPS) hctl_mock.assert_called_once_with(mock.sentinel.session, self.CON_PROPS['target_lun']) scan_mock.assert_called_once_with(*hctl) self.assertEqual(5, dev_name_mock.call_count) self.assertEqual(4, sleep_mock.call_count) @mock.patch('os_brick.utils._time_sleep') @mock.patch.object(linuxscsi.LinuxSCSI, 'scan_iscsi') @mock.patch.object(linuxscsi.LinuxSCSI, 'device_name_by_hctl', side_effect=(None, None, None, None, 'sda')) @mock.patch.object(iscsi.ISCSIConnector, '_connect_to_iscsi_portal') def test_connect_vol_manual(self, connect_mock, dev_name_mock, scan_mock, sleep_mock): lscsi = self.connector._linuxscsi data = self._get_connect_vol_data() hctl = [mock.sentinel.host, mock.sentinel.channel, mock.sentinel.target, mock.sentinel.lun] # Simulate manual scan connect_mock.return_value = (mock.sentinel.session, True) with mock.patch.object(lscsi, 'get_hctl', return_value=hctl) as hctl_mock: self.connector._connect_vol(3, self.CON_PROPS, data) expected = self._get_connect_vol_data() expected.update(num_logins=1, stopped_threads=1, found_devices=['sda'], just_added_devices=['sda']) self.assertDictEqual(expected, data) connect_mock.assert_called_once_with(self.CON_PROPS) hctl_mock.assert_called_once_with(mock.sentinel.session, self.CON_PROPS['target_lun']) self.assertEqual(2, scan_mock.call_count) self.assertEqual(5, dev_name_mock.call_count) self.assertEqual(4, sleep_mock.call_count) @mock.patch.object(iscsi.ISCSIConnector, '_connect_to_iscsi_portal', return_value=(None, False)) def test_connect_vol_no_session(self, connect_mock): data = self._get_connect_vol_data() self.connector._connect_vol(3, self.CON_PROPS, data) expected = self._get_connect_vol_data() expected.update(failed_logins=1, stopped_threads=1) self.assertDictEqual(expected, data) @mock.patch.object(iscsi.ISCSIConnector, '_connect_to_iscsi_portal') def test_connect_vol_with_connection_failure(self, connect_mock): data = self._get_connect_vol_data() connect_mock.side_effect = Exception() self.connector._connect_vol(3, self.CON_PROPS, data) expected = self._get_connect_vol_data() expected.update(failed_logins=1, stopped_threads=1) self.assertDictEqual(expected, data) @mock.patch('os_brick.utils._time_sleep', mock.Mock()) @mock.patch.object(linuxscsi.LinuxSCSI, 'scan_iscsi') @mock.patch.object(linuxscsi.LinuxSCSI, 'device_name_by_hctl', return_value=None) @mock.patch.object(iscsi.ISCSIConnector, '_connect_to_iscsi_portal') def test_connect_vol_not_found(self, connect_mock, dev_name_mock, scan_mock): lscsi = self.connector._linuxscsi data = self._get_connect_vol_data() hctl = [mock.sentinel.host, mock.sentinel.channel, mock.sentinel.target, mock.sentinel.lun] # True because we are simulating we have manual scans connect_mock.return_value = (mock.sentinel.session, True) with mock.patch.object(lscsi, 'get_hctl', side_effect=(hctl,)) as hctl_mock: self.connector._connect_vol(3, self.CON_PROPS, data) expected = self._get_connect_vol_data() expected.update(num_logins=1, stopped_threads=1) self.assertDictEqual(expected, data) hctl_mock.assert_called_once_with(mock.sentinel.session, self.CON_PROPS['target_lun']) # We have 3 scans because on manual mode we also scan on connect scan_mock.assert_has_calls([mock.call(*hctl)] * 3) dev_name_mock.assert_has_calls( [mock.call(mock.sentinel.session, hctl), mock.call(mock.sentinel.session, hctl)]) @mock.patch('os_brick.utils._time_sleep', mock.Mock()) @mock.patch.object(linuxscsi.LinuxSCSI, 'scan_iscsi') @mock.patch.object(iscsi.ISCSIConnector, '_connect_to_iscsi_portal') def test_connect_vol_stop_connecting(self, connect_mock, scan_mock): data = self._get_connect_vol_data() def device_name_by_hctl(session, hctl): data['stop_connecting'] = True return None lscsi = self.connector._linuxscsi hctl = [mock.sentinel.host, mock.sentinel.channel, mock.sentinel.target, mock.sentinel.lun] connect_mock.return_value = (mock.sentinel.session, False) with mock.patch.object(lscsi, 'get_hctl', return_value=hctl) as hctl_mock, \ mock.patch.object( lscsi, 'device_name_by_hctl', side_effect=device_name_by_hctl) as dev_name_mock: self.connector._connect_vol(3, self.CON_PROPS, data) expected = self._get_connect_vol_data() expected.update(num_logins=1, stopped_threads=1, stop_connecting=True) self.assertDictEqual(expected, data) hctl_mock.assert_called_once_with(mock.sentinel.session, self.CON_PROPS['target_lun']) scan_mock.assert_not_called() dev_name_mock.assert_called_once_with(mock.sentinel.session, hctl) def test__get_connect_result(self): props = self.CON_PROPS.copy() props['encrypted'] = False res = self.connector._get_connect_result(props, 'wwn', ['sda', 'sdb']) expected = {'type': 'block', 'scsi_wwn': 'wwn', 'path': '/dev/sda'} self.assertDictEqual(expected, res) def test__get_connect_result_mpath(self): props = self.CON_PROPS.copy() props['encrypted'] = False res = self.connector._get_connect_result(props, 'wwn', ['sda', 'sdb'], 'mpath') expected = {'type': 'block', 'scsi_wwn': 'wwn', 'path': '/dev/mpath', 'multipath_id': 'wwn'} self.assertDictEqual(expected, res) @mock.patch.object(iscsi.ISCSIConnector, '_run_iscsiadm_bare') def test_get_node_startup_values(self, run_iscsiadm_bare_mock): name1 = 'volume-00000001-1' name2 = 'volume-00000001-2' name3 = 'volume-00000001-3' vol = {'id': 1, 'name': name1} location = '10.0.2.15:3260' iqn1 = 'iqn.2010-10.org.openstack:%s' % name1 iqn2 = 'iqn.2010-10.org.openstack:%s' % name2 iqn3 = 'iqn.2010-10.org.openstack:%s' % name3 connection_properties = self.iscsi_connection(vol, [location], [iqn1]) node_startup1 = "manual" node_startup2 = "automatic" node_startup3 = "manual" node_values = ( '# BEGIN RECORD 2.0-873\n' 'node.name = %s\n' 'node.tpgt = 1\n' 'node.startup = %s\n' 'iface.hwaddress = \n' '# END RECORD\n' '# BEGIN RECORD 2.0-873\n' 'node.name = %s\n' 'node.tpgt = 1\n' 'node.startup = %s\n' 'iface.hwaddress = \n' '# END RECORD\n' '# BEGIN RECORD 2.0-873\n' 'node.name = %s\n' 'node.tpgt = 1\n' 'node.startup = %s\n' 'iface.hwaddress = \n' '# END RECORD\n') % (iqn1, node_startup1, iqn2, node_startup2, iqn3, node_startup3) run_iscsiadm_bare_mock.return_value = (node_values, None) node_startups =\ self.connector._get_node_startup_values( connection_properties['data']) expected_node_startups = {iqn1: node_startup1, iqn2: node_startup2, iqn3: node_startup3} self.assertEqual(node_startups, expected_node_startups) @mock.patch.object(iscsi.ISCSIConnector, '_execute') def test_get_node_startup_values_no_nodes(self, exec_mock): connection_properties = {'target_portal': 'ip1:port1'} no_nodes_output = '' no_nodes_err = 'iscsiadm: No records found\n' exec_mock.return_value = (no_nodes_output, no_nodes_err) res = self.connector._get_node_startup_values(connection_properties) self.assertEqual({}, res) exec_mock.assert_called_once_with( 'iscsiadm', '-m', 'node', '--op', 'show', '-p', connection_properties['target_portal'], root_helper=self.connector._root_helper, run_as_root=True, check_exit_code=(0, 21)) @mock.patch.object(iscsi.ISCSIConnector, '_get_node_startup_values') @mock.patch.object(iscsi.ISCSIConnector, '_iscsiadm_update') def test_recover_node_startup_values(self, iscsiadm_update_mock, get_node_startup_values_mock): name1 = 'volume-00000001-1' name2 = 'volume-00000001-2' name3 = 'volume-00000001-3' vol = {'id': 1, 'name': name1} location = '10.0.2.15:3260' iqn1 = 'iqn.2010-10.org.openstack:%s' % name1 iqn2 = 'iqn.2010-10.org.openstack:%s' % name2 iqn3 = 'iqn.2010-10.org.openstack:%s' % name3 connection_properties = self.iscsi_connection(vol, [location], iqn1) recover_connection = self.iscsi_connection(vol, [location], iqn2) node_startup1 = "manual" node_startup2 = "automatic" node_startup3 = "manual" get_node_startup_values_mock.return_value = {iqn1: node_startup1, iqn2: node_startup2, iqn3: node_startup3} old_node_startup_values = {iqn1: node_startup1, iqn2: "manual", iqn3: node_startup3} self.connector._recover_node_startup_values( connection_properties['data'], old_node_startup_values) iscsiadm_update_mock.assert_called_once_with( recover_connection['data'], "node.startup", "manual") @ddt.data(None, 'SAM2') @mock.patch.object(linuxscsi.LinuxSCSI, 'lun_for_addressing') @mock.patch.object(iscsi.base_iscsi.BaseISCSIConnector, '_get_all_targets') def test__get_all_targets_no_addressing_mode(self, addressing_mode, get_mock, luns_mock): get_mock.return_value = [ (mock.sentinel.portal1, mock.sentinel.iqn1, mock.sentinel.lun1), (mock.sentinel.portal2, mock.sentinel.iqn2, mock.sentinel.lun2) ] luns_mock.side_effect = [mock.sentinel.lun1B, mock.sentinel.lun2B] conn_props = self.CON_PROPS.copy() if addressing_mode: conn_props['addressing_mode'] = addressing_mode res = self.connector._get_all_targets(conn_props) self.assertEqual(2, luns_mock.call_count) luns_mock.assert_has_calls( [mock.call(mock.sentinel.lun1, addressing_mode), mock.call(mock.sentinel.lun2, addressing_mode)]) get_mock.assert_called_once_with(conn_props) expected = [ (mock.sentinel.portal1, mock.sentinel.iqn1, mock.sentinel.lun1B), (mock.sentinel.portal2, mock.sentinel.iqn2, mock.sentinel.lun2B) ] self.assertListEqual(expected, res) @mock.patch.object(linuxscsi.LinuxSCSI, 'is_multipath_running') def test_supports_multipath(self, mock_mpath_running): self.connector.supports_multipath() mock_mpath_running.assert_called_once_with( root_helper=self.connector._root_helper) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_iser.py0000664000175000017500000000654600000000000025117 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_brick.initiator.connectors import iscsi from os_brick.tests.initiator import test_connector class ISERConnectorTestCase(test_connector.ConnectorTestCase): def setUp(self): super(ISERConnectorTestCase, self).setUp() self.connector = iscsi.ISCSIConnector( None, execute=self.fake_execute, use_multipath=False) self.connection_data = { 'volume_id': 'volume_id', 'target_portal': 'ip:port', 'target_iqn': 'target_1', 'target_lun': 1, 'target_portals': ['ip:port'], 'target_iqns': ['target_1'], 'target_luns': [1] } @mock.patch.object(iscsi.ISCSIConnector, '_get_ips_iqns_luns') @mock.patch('glob.glob') @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_nodes') def test_get_connection_devices( self, nodes_mock, sessions_mock, glob_mock, iql_mock): self.connector.use_multipath = True iql_mock.return_value = \ self.connector._get_all_targets(self.connection_data) # mocked iSCSI sessions sessions_mock.return_value = \ [('iser:', '0', 'ip:port', '1', 'target_1')] # mocked iSCSI nodes nodes_mock.return_value = [('ip:port', 'target_1')] sys_cls = '/sys/class/scsi_host/host' glob_mock.side_effect = [ [sys_cls + '1/device/session/target/1:1:1:1/block/sda'] ] res = self.connector._get_connection_devices(self.connection_data) expected = {('ip:port', 'target_1'): ({'sda'}, set())} self.assertDictEqual(expected, res) iql_mock.assert_called_once_with(self.connection_data, discover=False, is_disconnect_call=False) @mock.patch.object(iscsi.ISCSIConnector, '_get_iscsi_sessions_full') @mock.patch.object(iscsi.ISCSIConnector, '_execute') def test_connect_to_iscsi_portal(self, exec_mock, sessions_mock): """Connect to portal while session already established""" # connected sessions sessions_mock.side_effect = [ [('iser:', 'session_iser', 'ip:port', '1', 'target_1')] ] exec_mock.side_effect = [('', None), ('', None), ('', None)] res = self.connector._connect_to_iscsi_portal(self.connection_data) # session name is expected to be in the result. self.assertEqual(("session_iser", True), res) prefix = 'iscsiadm -m node -T target_1 -p ip:port' expected_cmds = [ prefix, prefix + ' --op update -n node.session.scan -v manual' ] actual_cmds = [' '.join(args[0]) for args in exec_mock.call_args_list] self.assertListEqual(expected_cmds, actual_cmds) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_lightos.py0000664000175000017500000002740200000000000025620 0ustar00zuulzuul00000000000000# Copyright (C) 2016-2020 Lightbits Labs Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import http.client import queue from unittest import mock from unittest.mock import mock_open import psutil from os_brick import exception from os_brick.initiator.connectors import lightos from os_brick.initiator import linuxscsi from os_brick.privileged import lightos as priv_lightos from os_brick.tests.initiator import test_connector from os_brick import utils FAKE_NQN = "nqn.fake.qnq" FAKE_HOST_IPS = [ "1234:5678:9abc:def0:1234:5678:9abc:def0", "1234:5678:0:42::8a2e:370:7334", '172.17.0.1'] FAKE_LIGHTOS_CLUSTER_NODES = { "nodes": [ {"UUID": "926e6df8-73e1-11ec-a624-000000000001", "nvmeEndpoint": "192.168.75.10:4420"}, {"UUID": "926e6df8-73e1-11ec-a624-000000000002", "nvmeEndpoint": "192.168.75.11:4420"}, {"UUID": "926e6df8-73e1-11ec-a624-000000000003", "nvmeEndpoint": "192.168.75.12:4420"} ] } FAKE_SUBSYSNQN = "nqn.2014-08.org.nvmexpress:NVMf:uuid:" FAKE_LIGHTOS_CLUSTER_INFO = { 'UUID': "926e6df8-73e1-11ec-a624-07ba3880f6cc", 'subsystemNQN': "nqn.2014-08.org.nvmexpress:NVMf:uuid:" "f4a89ce0-9fc2-4900-bfa3-00ad27995e7b", 'nodes_ips': ["10.17.167.4", "10.17.167.5", "10.17.167.6"] } FAKE_VOLUME_UUID = "926e6df8-73e1-11ec-a624-07ba3880f6cd" NUM_BLOCKS_IN_GIB = 2097152 BLOCK_SIZE = 512 def get_http_response_mock(status): resp = mock.Mock() resp.status = status return resp class LightosConnectorTestCase(test_connector.ConnectorTestCase): """Test cases for NVMe initiator class.""" def setUp(self): super(LightosConnectorTestCase, self).setUp() self.connector = lightos.LightOSConnector(None, execute=self.fake_execute) @staticmethod def _get_connection_info(): lightos_nodes = {} for ip in FAKE_LIGHTOS_CLUSTER_INFO['nodes_ips']: lightos_nodes[ip] = dict( transport_type='tcp', target_portal=ip, target_port=8009 ) return dict( subsysnqn=FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'], uuid=FAKE_LIGHTOS_CLUSTER_INFO['UUID'], lightos_nodes=lightos_nodes ) @mock.patch.object(lightos.LightOSConnector, 'get_ip_addresses', return_value=FAKE_HOST_IPS) @mock.patch.object(utils, 'get_host_nqn', return_value=FAKE_NQN) @mock.patch.object(lightos.LightOSConnector, 'find_dsc', return_value=True) def test_get_connector_properties(self, mock_nqn, mock_dsc, mock_host_ips): props = self.connector.get_connector_properties(None) expected_props = {"nqn": FAKE_NQN, "found_dsc": True, "host_ips": FAKE_HOST_IPS} self.assertEqual(expected_props, props) @mock.patch.object(lightos.http.client.HTTPConnection, "request", return_value=None) @mock.patch.object(lightos.http.client.HTTPConnection, "getresponse", return_value=get_http_response_mock(http.client.OK)) def test_find_dsc_success(self, mocked_connection, mocked_response): mocked_connection.request.return_value = None mocked_response.getresponse.return_value = get_http_response_mock( http.client.OK) self.assertEqual(self.connector.find_dsc(), 'found') @mock.patch.object(lightos.http.client.HTTPConnection, "request", return_value=None) @mock.patch.object(lightos.http.client.HTTPConnection, "getresponse", return_value=get_http_response_mock( http.client.NOT_FOUND)) def test_find_dsc_failure(self, mocked_connection, mocked_response): mocked_connection.request.return_value = None mocked_response.getresponse.return_value = get_http_response_mock( http.client.OK) self.assertEqual(self.connector.find_dsc(), '') @mock.patch.object(utils, 'get_host_nqn', return_value=FAKE_NQN) @mock.patch.object(lightos.priv_lightos, 'move_dsc_file', return_value="/etc/discovery_client/discovery.d/v0") @mock.patch.object(lightos.LightOSConnector, '_check_device_exists_using_dev_lnk', return_value="/dev/nvme0n1") def test_connect_volume_succeed(self, mock_nqn, mock_move_file, mock_check_device): self.connector.connect_volume(self._get_connection_info()) @mock.patch.object(utils, 'get_host_nqn', return_value=FAKE_NQN) @mock.patch.object(lightos.priv_lightos, 'move_dsc_file', return_value="/etc/discovery_client/discovery.d/v0") @mock.patch.object(lightos.priv_lightos, 'delete_dsc_file', return_value=None) @mock.patch.object(lightos.LightOSConnector, '_get_device_by_uuid', return_value=None) def test_connect_volume_failure(self, mock_nqn, mock_move_file, mock_delete_file, mock_get_device): self.assertRaises(exception.BrickException, self.connector.connect_volume, self._get_connection_info()) @mock.patch.object(priv_lightos, 'delete_dsc_file', return_value=True) def test_dsc_disconnect_volume_succeed(self, mock_priv_lightos): self.connector.dsc_disconnect_volume(self._get_connection_info()) @mock.patch.object(priv_lightos, 'delete_dsc_file', side_effect=OSError("failed to delete file")) def test_dsc_disconnect_volume_failure(self, execute_mock): self.assertRaises(OSError, self.connector.dsc_disconnect_volume, self._get_connection_info()) @mock.patch.object(lightos.LightOSConnector, '_check_device_exists_using_dev_lnk', return_value=("/dev/nvme0n1")) def test_get_device_by_uuid_succeed_with_link(self, execute_mock): self.assertEqual(self.connector._get_device_by_uuid(FAKE_VOLUME_UUID), "/dev/nvme0n1") @mock.patch.object(lightos.LightOSConnector, '_check_device_exists_reading_block_class', return_value=("/dev/nvme0n1")) def test_get_device_by_uuid_succeed_with_block_class(self, execute_mock): self.assertEqual(self.connector._get_device_by_uuid(FAKE_VOLUME_UUID), "/dev/nvme0n1") @mock.patch.object(lightos.LightOSConnector, '_check_device_exists_using_dev_lnk', side_effect=[None, False, "/dev/nvme0n1"]) @mock.patch.object(lightos.LightOSConnector, '_check_device_exists_reading_block_class', side_effect=[None, False, "/dev/nvme0n1"]) def test_get_device_by_uuid_many_attempts(self, execute_mock, glob_mock): self.assertEqual(self.connector._get_device_by_uuid(FAKE_VOLUME_UUID), '/dev/nvme0n1') @mock.patch.object(lightos.LightOSConnector, 'dsc_connect_volume', return_value=None) @mock.patch.object(lightos.LightOSConnector, '_get_device_by_uuid', return_value="/dev/nvme0n1") def test_connect_volume(self, dsc_connect, path): connection_properties = {"nqn": FAKE_NQN, "found_dsc": True, "uuid": "123"} expected_device_info = {'type': 'block', "path": "/dev/nvme0n1"} device_info = self.connector.connect_volume(connection_properties) self.assertEqual(expected_device_info, device_info) @mock.patch.object(linuxscsi.LinuxSCSI, 'flush_device_io', autospec=True) @mock.patch.object(lightos.LightOSConnector, '_get_device_by_uuid', return_value="/dev/nvme0n1") @mock.patch.object(lightos.LightOSConnector, 'dsc_disconnect_volume') def test_disconnect_volume(self, mock_disconnect, mock_uuid, mock_flush): connection_properties = {"nqn": FAKE_NQN, "found_dsc": True, "uuid": "123"} self.connector.disconnect_volume(connection_properties, None) mock_disconnect.assert_called_once_with(connection_properties) mock_flush.assert_called_once_with(mock.ANY, "/dev/nvme0n1") @mock.patch.object(lightos.LightOSConnector, '_get_device_by_uuid', return_value="/dev/nvme0n1") @mock.patch("builtins.open", new_callable=mock_open, read_data=f"{str(NUM_BLOCKS_IN_GIB)}\n") def test_extend_volume(self, mock_execute, m_open): connection_properties = {'uuid': FAKE_VOLUME_UUID} self.assertEqual(self.connector.extend_volume(connection_properties), NUM_BLOCKS_IN_GIB * BLOCK_SIZE) def mock_net_if_addr(): class MockSnicAdd: def __init__(self, address): self.address = address return { 'lo': [ MockSnicAdd(address='127.0.0.1'), MockSnicAdd(address='::1') ], 'wlp0s20f3': [ MockSnicAdd(address=FAKE_HOST_IPS[2]), MockSnicAdd(address=FAKE_HOST_IPS[1]), MockSnicAdd(address=f'{FAKE_HOST_IPS[0]}%wlp0s20f3') ] } @mock.patch.object(psutil, 'net_if_addrs', new=mock_net_if_addr) def test_get_ips(self): self.assertEqual(sorted(self.connector.get_ip_addresses()), sorted(FAKE_HOST_IPS)) def test_monitor_message_queue_delete(self): message_queue = queue.Queue() connection = {"uuid": "123"} message_queue.put(("delete", connection)) lightos_db = {"123": "fake_connection"} self.connector.monitor_message_queue(message_queue, lightos_db) self.assertEqual(len(lightos_db), 0) def test_monitor_message_queue_add(self): message_queue = queue.Queue() connection = {"uuid": "123"} lightos_db = {} message_queue.put(("add", connection)) self.connector.monitor_message_queue(message_queue, lightos_db) self.assertEqual(len(lightos_db), 1) @mock.patch.object(lightos.os.path, 'exists', return_value=True) @mock.patch.object(lightos.os.path, 'realpath', return_value="/dev/nvme0n1") def test_check_device_exists_using_dev_lnk_succeed(self, mock_path_exists, mock_realpath): found_dev = self.connector._check_device_exists_using_dev_lnk( FAKE_VOLUME_UUID) self.assertEqual("/dev/nvme0n1", found_dev) def test_check_device_exists_using_dev_lnk_false(self): self.assertIsNone(self.connector._check_device_exists_using_dev_lnk( FAKE_VOLUME_UUID)) @mock.patch.object(glob, "glob", return_value=['/path/nvme0n1/wwid']) @mock.patch("builtins.open", new_callable=mock_open, read_data=f"uuid.{FAKE_VOLUME_UUID}\n") def test_check_device_exists_reading_block_class(self, mock_glob, m_open): found_dev = self.connector._check_device_exists_reading_block_class( FAKE_VOLUME_UUID) self.assertEqual("/dev/nvme0n1", found_dev) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_local.py0000664000175000017500000000435400000000000025242 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.initiator.connectors import local from os_brick.tests.initiator import test_connector class LocalConnectorTestCase(test_connector.ConnectorTestCase): def setUp(self): super(LocalConnectorTestCase, self).setUp() self.connection_properties = {'name': 'foo', 'device_path': '/tmp/bar'} self.connector = local.LocalConnector(None) def test_get_connector_properties(self): props = local.LocalConnector.get_connector_properties( 'sudo', multipath=True, enforce_multipath=True) expected_props = {} self.assertEqual(expected_props, props) def test_get_search_path(self): actual = self.connector.get_search_path() self.assertIsNone(actual) def test_get_volume_paths(self): expected = [self.connection_properties['device_path']] actual = self.connector.get_volume_paths( self.connection_properties) self.assertEqual(expected, actual) def test_connect_volume(self): cprops = self.connection_properties dev_info = self.connector.connect_volume(cprops) self.assertEqual(dev_info['type'], 'local') self.assertEqual(dev_info['path'], cprops['device_path']) def test_connect_volume_with_invalid_connection_data(self): cprops = {} self.assertRaises(ValueError, self.connector.connect_volume, cprops) def test_extend_volume(self): self.assertRaises(NotImplementedError, self.connector.extend_volume, self.connection_properties) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_nvmeof.py0000664000175000017500000031640000000000000025440 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import builtins import errno import os.path from unittest import mock import ddt from oslo_concurrency import processutils as putils from os_brick import exception from os_brick import executor from os_brick.initiator.connectors import nvmeof from os_brick.privileged import nvmeof as priv_nvmeof from os_brick.privileged import rootwrap as priv_rootwrap from os_brick.tests import base as test_base from os_brick.tests.initiator import test_connector from os_brick import utils TARGET_NQN = 'target.nqn' VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba' VOL_UUID_NO_HYPHENS = 'c20aba216ef6446bb37445733b4883ba' NVME_DEVICE_PATH = '/dev/nvme1' NVME_NS_PATH = '/dev/nvme1n1' NGUID = '4941ef75-95b8-ee97-8ccf-096800f205c6' NGUID_NO_HYPHENS = '4941ef7595b8ee978ccf096800f205c6' SYS_UUID = '9126E942-396D-11E7-B0B7-A81E84C186D1' HOST_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba' HOST_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \ 'beaae2de-3a97-4be1-a739-6ac4bc5bf138' VOL_UUID1 = '9b30ec12-75b9-4a53-be32-111111111111' VOL_UUID2 = '9b30ec12-75b9-4a53-be32-222222222222' VOL_UUID3 = '9b30ec12-75b9-4a53-be32-333333333333' volume_replicas = [{'target_nqn': 'fakenqn1', 'vol_uuid': VOL_UUID1, 'portals': [('10.0.0.1', 4420, 'tcp')]}, {'target_nqn': 'fakenqn2', 'vol_uuid': VOL_UUID2, 'portals': [('10.0.0.2', 4420, 'tcp')]}, {'target_nqn': 'fakenqn3', 'vol_uuid': VOL_UUID3, 'portals': [('10.0.0.3', 4420, 'tcp')]}] connection_properties = { 'alias': 'fakealias', 'vol_uuid': VOL_UUID, 'volume_replicas': volume_replicas, 'replica_count': 3 } CONN_PROPS = nvmeof.NVMeOFConnProps(connection_properties) fake_portal = ('fake', 'portal', 'tcp') nvme_list_stdout = """ Node SN Model Namespace Usage Format FW Rev ------------- ------- ----- --------- ---------------- ----------- ------- /dev/nvme0n1 AB12345 s123 12682 0.00 B / 2.15 GB 512 B + 0 B 2.1.0.0 /dev/nvme0n2 AB12345 s123 12683 0.00 B / 1.07 GB 512 B + 0 B 2.1.0.0 """ md_stat_contents = """ Personalities : [raid0] md0 : active raid0 nvme0n1[4] nvme1n1[3] nvme2n1[2] nvme3n1[1] 20508171264 blocks super 1.2 level 5, 512k chunk, algorithm 2 [4/4] [UUUU] unused devices: """ # noqa @ddt.ddt class UtilityMethodsTestCase(test_base.TestCase): @mock.patch.object(nvmeof, 'sysfs_property', return_value='live') def test_ctrl_property(self, mock_sysfs): """Controller properties just read from nvme fabrics in sysfs.""" res = nvmeof.ctrl_property('state', 'nvme0') self.assertEqual('live', res) mock_sysfs.assert_called_once_with('state', '/sys/class/nvme-fabrics/ctl/nvme0') @mock.patch.object(nvmeof, 'sysfs_property', return_value='uuid_value') def test_blk_property(self, mock_sysfs): """Block properties just read from block devices in sysfs.""" res = nvmeof.blk_property('uuid', 'nvme0n1') self.assertEqual('uuid_value', res) mock_sysfs.assert_called_once_with('uuid', '/sys/class/block/nvme0n1') @mock.patch.object(builtins, 'open') def test_sysfs_property(self, mock_open): """Method is basically an open an read method.""" mock_read = mock_open.return_value.__enter__.return_value.read mock_read.return_value = ' uuid ' res = nvmeof.sysfs_property('uuid', '/sys/class/block/nvme0n1') self.assertEqual('uuid', res) mock_open.assert_called_once_with('/sys/class/block/nvme0n1/uuid', 'r') mock_read.assert_called_once_with() @mock.patch.object(builtins, 'open', side_effect=FileNotFoundError) def test_sysfs_property_not_found(self, mock_open): """Failure to open file returns None.""" mock_read = mock_open.return_value.__enter__.return_value.read res = nvmeof.sysfs_property('uuid', '/sys/class/block/nvme0n1') self.assertIsNone(res) mock_open.assert_called_once_with('/sys/class/block/nvme0n1/uuid', 'r') mock_read.assert_not_called() @mock.patch.object(builtins, 'open') def test_sysfs_property_ioerror(self, mock_open): """Failure to read file returns None.""" mock_read = mock_open.return_value.__enter__.return_value.read mock_read.side_effect = IOError res = nvmeof.sysfs_property('uuid', '/sys/class/block/nvme0n1') self.assertIsNone(res) mock_open.assert_called_once_with('/sys/class/block/nvme0n1/uuid', 'r') mock_read.assert_called_once_with() @ddt.data('/dev/nvme0n10', '/sys/class/block/nvme0c1n10', '/sys/class/nvme-fabrics/ctl/nvme1/nvme0c1n10') def test_nvme_basename(self, name): """ANA devices are transformed to the right name.""" res = nvmeof.nvme_basename(name) self.assertEqual('nvme0n10', res) @ddt.ddt class PortalTestCase(test_base.TestCase): def setUp(self): self.conn_props_dict = {'target_nqn': 'nqn_value', 'vol_uuid': VOL_UUID, 'portals': [('portal1', 'port1', 'RoCEv2')]} self.conn_props = nvmeof.NVMeOFConnProps(self.conn_props_dict) self.target = self.conn_props.targets[0] self.portal = self.target.portals[0] super().setUp() @ddt.data(('RoCEv2', 'rdma'), ('rdma', 'rdma'), ('tcp', 'tcp'), ('TCP', 'tcp'), ('other', 'tcp')) @ddt.unpack def test_init(self, transport, expected_transport): """Init changes conn props transport into rdma or tcp.""" portal = nvmeof.Portal(self.target, 'address', 'port', transport) self.assertEqual(self.target, portal.parent_target) self.assertEqual('address', portal.address) self.assertEqual('port', portal.port) self.assertIsNone(portal.controller) self.assertEqual(expected_transport, portal.transport) @ddt.data(('live', True), ('connecting', False), (None, False)) @ddt.unpack @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test_is_live(self, state, expected, mock_state): """Is live only returns True if the state is 'live'.""" mock_state.return_value = state self.assertIs(expected, self.portal.is_live) mock_state.assert_called_once_with() @mock.patch.object(nvmeof, 'ctrl_property', return_value='10') def test_reconnect_delay(self, mock_property): """Reconnect delay returns an int.""" self.portal.controller = 'nvme0' self.assertIs(10, self.portal.reconnect_delay) mock_property.assert_called_once_with('reconnect_delay', 'nvme0') @mock.patch.object(nvmeof, 'ctrl_property') def test_state(self, mock_property): """State uses sysfs to check the value.""" self.portal.controller = 'nvme0' self.assertEqual(mock_property.return_value, self.portal.state) mock_property.assert_called_once_with('state', 'nvme0') @mock.patch.object(nvmeof, 'ctrl_property') def test_state_no_controller(self, mock_property): """Cannot read the state if the controller name has not been found.""" self.portal.controller = None self.assertIsNone(self.portal.state) mock_property.assert_not_called() @mock.patch.object(nvmeof.Portal, 'get_device_by_property') def test_get_device(self, mock_property): """UUID has priority over everything else.""" mock_property.return_value = 'result' self.target.nguid = 'nguid' # will be ignored res = self.portal.get_device() self.assertEqual('result', res) mock_property.assert_called_once_with('uuid', self.target.uuid) @mock.patch.object(nvmeof.Portal, 'get_device_by_property') def test_get_device_by_nguid(self, mock_property): """nguid takes priority over ns_id if no UUID.""" mock_property.return_value = 'result' self.target.uuid = None self.target.nguid = 'nguid_value' self.target.ns_id = 'ns_id_value' # will be ignored res = self.portal.get_device() self.assertEqual('result', res) mock_property.assert_called_once_with('nguid', 'nguid_value') @mock.patch.object(nvmeof.Portal, 'get_device_by_property') def test_get_device_by_ns_id(self, mock_property): """ns_id takes priority if no UUID and nguid are present.""" mock_property.return_value = 'result' self.target.uuid = None self.target.nguid = None self.target.ns_id = 'ns_id_value' res = self.portal.get_device() self.assertEqual('result', res) mock_property.assert_called_once_with('nsid', 'ns_id_value') @mock.patch.object(nvmeof.Target, 'get_device_path_by_initial_devices') @mock.patch.object(nvmeof.Portal, 'get_device_by_property') def test_get_device_by_initial_devices(self, mock_property, mock_get_dev): """With no id, calls target to get device from initial devices.""" mock_get_dev.return_value = 'result' self.target.uuid = None self.target.nguid = None self.target.ns_id = None res = self.portal.get_device() self.assertEqual('result', res) mock_get_dev.assert_called_once_with() @mock.patch('glob.glob') def test_get_all_namespaces_ctrl_paths(self, mock_glob): expected = ['/sys/class/nvme-fabrics/ctl/nvme0/nvme0n1', '/sys/class/nvme-fabrics/ctl/nvme0/nvme1c1n2'] mock_glob.return_value = expected[:] self.portal.controller = 'nvme0' res = self.portal.get_all_namespaces_ctrl_paths() self.assertEqual(expected, res) mock_glob.assert_called_once_with( '/sys/class/nvme-fabrics/ctl/nvme0/nvme*') @mock.patch('glob.glob') def test_get_all_namespaces_ctrl_paths_no_controller(self, mock_glob): res = self.portal.get_all_namespaces_ctrl_paths() self.assertEqual([], res) mock_glob.assert_not_called() @mock.patch.object(nvmeof, 'nvme_basename', return_value='nvme1n2') @mock.patch.object(nvmeof, 'sysfs_property') @mock.patch.object(nvmeof.Portal, 'get_all_namespaces_ctrl_paths') def test_get_device_by_property(self, mock_paths, mock_property, mock_name): """Searches all devices for the right one and breaks when found.""" mock_paths.return_value = [ '/sys/class/nvme-fabrics/ctl/nvme0/nvme0n1', '/sys/class/nvme-fabrics/ctl/nvme0/nvme1c1n2', '/sys/class/nvme-fabrics/ctl/nvme0/nvme0n3' ] mock_property.side_effect = ['uuid1', 'uuid2'] self.portal.controller = 'nvme0' res = self.portal.get_device_by_property('uuid', 'uuid2') self.assertEqual('/dev/nvme1n2', res) mock_paths.assert_called_once_with() self.assertEqual(2, mock_property.call_count) mock_property.assert_has_calls( [mock.call('uuid', '/sys/class/nvme-fabrics/ctl/nvme0/nvme0n1'), mock.call('uuid', '/sys/class/nvme-fabrics/ctl/nvme0/nvme1c1n2')] ) mock_name.assert_called_once_with( '/sys/class/nvme-fabrics/ctl/nvme0/nvme1c1n2') @mock.patch.object(nvmeof, 'nvme_basename', return_value='nvme1n2') @mock.patch.object(nvmeof, 'sysfs_property') @mock.patch.object(nvmeof.Portal, 'get_all_namespaces_ctrl_paths') def test_get_device_by_property_not_found( self, mock_paths, mock_property, mock_name): """Exhausts devices searching before returning None.""" mock_paths.return_value = ['/sys/class/nvme-fabrics/ctl/nvme0/nvme0n1', '/sys/class/nvme-fabrics/ctl/nvme0/nvme0n2'] mock_property.side_effect = ['uuid1', 'uuid2'] self.portal.controller = 'nvme0' res = self.portal.get_device_by_property('uuid', 'uuid3') self.assertIsNone(res) mock_paths.assert_called_once_with() self.assertEqual(2, mock_property.call_count) mock_property.assert_has_calls( [mock.call('uuid', '/sys/class/nvme-fabrics/ctl/nvme0/nvme0n1'), mock.call('uuid', '/sys/class/nvme-fabrics/ctl/nvme0/nvme0n2')] ) mock_name.assert_not_called() @mock.patch.object(nvmeof.Portal, 'get_all_namespaces_ctrl_paths') def test__can_disconnect_no_controller_name(self, mock_paths): """Cannot disconnect when portal doesn't have a controller.""" res = self.portal.can_disconnect() self.assertFalse(res) mock_paths.assert_not_called() @ddt.data(([], True), (['/sys/class/nvme-fabrics/ctl/nvme0/nvme0n1', '/sys/class/nvme-fabrics/ctl/nvme0/nvme0n2'], False)) @ddt.unpack @mock.patch.object(nvmeof.Portal, 'get_all_namespaces_ctrl_paths') def test__can_disconnect_not_1_namespace( self, ctrl_paths, expected, mock_paths): """Check if can disconnect when we don't have 1 namespace in subsys.""" self.portal.controller = 'nvme0' mock_paths.return_value = ctrl_paths res = self.portal.can_disconnect() self.assertIs(expected, res) mock_paths.assert_called_once_with() @mock.patch.object(nvmeof.Portal, 'get_device') @mock.patch.object(nvmeof.Portal, 'get_all_namespaces_ctrl_paths') def test__can_disconnect(self, mock_paths, mock_device): """Can disconnect if the namespace is the one from this target. This tests that even when ANA is enabled it can identify the control path as belonging to the used device path. """ self.portal.controller = 'nvme0' mock_device.return_value = '/dev/nvme1n2' mock_paths.return_value = [ '/sys/class/nvme-fabrics/ctl/nvme0/nvme1c1n2'] self.assertTrue(self.portal.can_disconnect()) @mock.patch.object(nvmeof.Portal, 'get_device') @mock.patch.object(nvmeof.Portal, 'get_all_namespaces_ctrl_paths') def test__can_disconnect_different_target(self, mock_paths, mock_device): """Cannot disconnect if the namespace is from a different target.""" self.portal.controller = 'nvme0' mock_device.return_value = None mock_paths.return_value = [ '/sys/class/nvme-fabrics/ctl/nvme0/nvme1c1n2'] self.assertFalse(self.portal.can_disconnect()) @ddt.ddt class TargetTestCase(test_base.TestCase): def setUp(self): self.conn_props_dict = { 'target_nqn': 'nqn_value', 'vol_uuid': VOL_UUID, 'portals': [('portal1', 'port1', 'RoCEv2'), ('portal2', 'port2', 'anything')], } self.conn_props = nvmeof.NVMeOFConnProps(self.conn_props_dict) self.target = self.conn_props.targets[0] super().setUp() @mock.patch.object(nvmeof.Target, '__init__', return_value=None) def test_factory(self, mock_init): """Test Target factory The factory's parameter names take after the keys in the connection properties, and then calls the class init method that uses different names. """ res = nvmeof.Target.factory(self.conn_props, **self.conn_props_dict) mock_init.assert_called_once_with( self.conn_props, self.conn_props_dict['target_nqn'], self.conn_props_dict['portals'], self.conn_props_dict['vol_uuid'], None, # nguid None, # ns_id None, # host_nqn False) # find_controllers self.assertIsInstance(res, nvmeof.Target) @ddt.data(True, False) @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.Portal, '__init__', return_value=None) def test_init(self, find_controllers, mock_init, mock_set_ctrls): """Init instantiates portals and may call set_portals_controllers.""" target = nvmeof.Target(self.conn_props, 'nqn', self.conn_props_dict['portals'], # Confirm they get converted to hyphenated VOL_UUID_NO_HYPHENS, NGUID_NO_HYPHENS, 'ns_id', 'host_nqn', find_controllers) self.assertEqual(self.conn_props, target.source_conn_props) self.assertEqual('nqn', target.nqn) self.assertEqual(VOL_UUID, target.uuid) self.assertEqual(NGUID, target.nguid) self.assertEqual('ns_id', target.ns_id) self.assertEqual('host_nqn', target.host_nqn) self.assertIsInstance(target.portals[0], nvmeof.Portal) self.assertIsInstance(target.portals[1], nvmeof.Portal) if find_controllers: mock_set_ctrls.assert_called_once_with() else: mock_set_ctrls.assert_not_called() self.assertEqual(2, mock_init.call_count) mock_init.assert_has_calls( [mock.call(target, 'portal1', 'port1', 'RoCEv2'), mock.call(target, 'portal2', 'port2', 'anything')] ) @mock.patch.object(nvmeof.Target, '_get_nvme_devices') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.Portal, '__init__', return_value=None) def test_init_no_id(self, mock_init, mock_set_ctrls, mock_get_devs): """With no ID parameters query existing nvme devices.""" target = nvmeof.Target(self.conn_props, 'nqn', self.conn_props_dict['portals']) self.assertEqual(self.conn_props, target.source_conn_props) self.assertEqual('nqn', target.nqn) for name in ('uuid', 'nguid', 'ns_id'): self.assertIsNone(getattr(target, name)) self.assertIsInstance(target.portals[0], nvmeof.Portal) self.assertIsInstance(target.portals[1], nvmeof.Portal) mock_set_ctrls.assert_not_called() mock_get_devs.assert_called_once_with() self.assertEqual(2, mock_init.call_count) mock_init.assert_has_calls( [mock.call(target, 'portal1', 'port1', 'RoCEv2'), mock.call(target, 'portal2', 'port2', 'anything')] ) @mock.patch('glob.glob', return_value=['/dev/nvme0n1', '/dev/nvme1n1']) def test__get_nvme_devices(self, mock_glob): """Test getting all nvme devices present in system.""" res = self.target._get_nvme_devices() self.assertEqual(mock_glob.return_value, res) mock_glob.assert_called_once_with('/dev/nvme*n*') @mock.patch.object(nvmeof.Portal, 'is_live', new_callable=mock.PropertyMock) def test_live_portals(self, mock_is_live): """List with only live portals should be returned.""" mock_is_live.side_effect = (True, False) res = self.target.live_portals self.assertListEqual([self.target.portals[0]], res) @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test_present_portals(self, mock_state): """List with only live portals should be returned.""" # Duplicate number of portals self.target.portals.extend(self.target.portals) mock_state.side_effect = (None, 'live', 'connecting', 'live') res = self.target.present_portals self.assertListEqual(self.target.portals[1:], res) @mock.patch('glob.glob') def test_set_portals_controllers_do_nothing(self, mock_glob): """Do nothing if all protals already have the controller name.""" self.target.portals[0].controller = 'nvme0' self.target.portals[1].controller = 'nvme1' self.target.set_portals_controllers() mock_glob.assert_not_called() @ddt.data('traddr=portal2,trsvcid=port2', 'traddr=portal2,trsvcid=port2,src_addr=myip') @mock.patch.object(nvmeof, 'sysfs_property') @mock.patch('glob.glob') def test_set_portals_controllers(self, addr, mock_glob, mock_sysfs): """Look in sysfs for the device paths.""" portal = nvmeof.Portal(self.target, 'portal4', 'port4', 'tcp') portal.controller = 'nvme0' self.target.portals.insert(0, portal) self.target.portals.append(nvmeof.Portal(self.target, 'portal5', 'port5', 'tcp')) self.target.host_nqn = 'nqn' mock_glob.return_value = ['/sys/class/nvme-fabrics/ctl/nvme0', '/sys/class/nvme-fabrics/ctl/nvme1', '/sys/class/nvme-fabrics/ctl/nvme2', '/sys/class/nvme-fabrics/ctl/nvme3', '/sys/class/nvme-fabrics/ctl/nvme4', '/sys/class/nvme-fabrics/ctl/nvme5'] mock_sysfs.side_effect = [ # nvme0 is skipped because it already belongs to the first portal # nvme1 nqn doesn't match 'wrong-nqn', # nvme2 matches nqn but not the address self.target.nqn, 'rdma', 'traddr=portal5,trsvcid=port5', 'nqn', # nvme3 matches first portal but not the host_nqn self.target.nqn, 'rdma', 'traddr=portal2,trsvcid=port2', 'badnqn', # nvme4 matches first portal self.target.nqn, 'tcp', addr, 'nqn', # nvme5 simulates OS doesn't have the hostnqn attribute self.target.nqn, 'tcp', 'traddr=portal5,trsvcid=port5', None, ] self.target.set_portals_controllers() mock_glob.assert_called_once_with('/sys/class/nvme-fabrics/ctl/nvme*') expected_calls = [ mock.call('subsysnqn', '/sys/class/nvme-fabrics/ctl/nvme1'), mock.call('subsysnqn', '/sys/class/nvme-fabrics/ctl/nvme2'), mock.call('transport', '/sys/class/nvme-fabrics/ctl/nvme2'), mock.call('address', '/sys/class/nvme-fabrics/ctl/nvme2'), mock.call('hostnqn', '/sys/class/nvme-fabrics/ctl/nvme2'), mock.call('subsysnqn', '/sys/class/nvme-fabrics/ctl/nvme3'), mock.call('transport', '/sys/class/nvme-fabrics/ctl/nvme3'), mock.call('address', '/sys/class/nvme-fabrics/ctl/nvme3'), mock.call('hostnqn', '/sys/class/nvme-fabrics/ctl/nvme3'), mock.call('subsysnqn', '/sys/class/nvme-fabrics/ctl/nvme4'), mock.call('transport', '/sys/class/nvme-fabrics/ctl/nvme4'), mock.call('address', '/sys/class/nvme-fabrics/ctl/nvme4'), mock.call('hostnqn', '/sys/class/nvme-fabrics/ctl/nvme4'), mock.call('subsysnqn', '/sys/class/nvme-fabrics/ctl/nvme5'), mock.call('transport', '/sys/class/nvme-fabrics/ctl/nvme5'), mock.call('address', '/sys/class/nvme-fabrics/ctl/nvme5'), mock.call('hostnqn', '/sys/class/nvme-fabrics/ctl/nvme5'), ] self.assertEqual(len(expected_calls), mock_sysfs.call_count) mock_sysfs.assert_has_calls(expected_calls) # Confirm we didn't touch the first two portals self.assertEqual('nvme0', self.target.portals[0].controller) self.assertIsNone(self.target.portals[1].controller) self.assertEqual('nvme4', self.target.portals[2].controller) self.assertEqual('nvme5', self.target.portals[3].controller) @mock.patch('os_brick.utils.get_host_nqn', mock.Mock(return_value='nqn')) @mock.patch.object(nvmeof, 'sysfs_property') @mock.patch('glob.glob') def test_set_portals_controllers_short_circuit( self, mock_glob, mock_sysfs): """Stops looking once we have found names for all portals.""" self.target.portals[0].controller = 'nvme0' mock_glob.return_value = ['/sys/class/nvme-fabrics/ctl/nvme0', '/sys/class/nvme-fabrics/ctl/nvme1', '/sys/class/nvme-fabrics/ctl/nvme2', '/sys/class/nvme-fabrics/ctl/nvme3'] mock_sysfs.side_effect = [ self.target.nqn, 'tcp', 'traddr=portal2,trsvcid=port2', 'nqn', ] self.target.set_portals_controllers() mock_glob.assert_called_once_with('/sys/class/nvme-fabrics/ctl/nvme*') expected_calls = [ mock.call('subsysnqn', '/sys/class/nvme-fabrics/ctl/nvme1'), mock.call('transport', '/sys/class/nvme-fabrics/ctl/nvme1'), mock.call('address', '/sys/class/nvme-fabrics/ctl/nvme1'), mock.call('hostnqn', '/sys/class/nvme-fabrics/ctl/nvme1'), ] self.assertEqual(len(expected_calls), mock_sysfs.call_count) mock_sysfs.assert_has_calls(expected_calls) # We set the first portal with the newly found controller name self.assertEqual('nvme0', self.target.portals[0].controller) # Confirm we didn't touch second portal self.assertEqual('nvme1', self.target.portals[1].controller) @mock.patch.object(nvmeof.Target, 'present_portals', new_callable=mock.PropertyMock) @mock.patch.object(nvmeof.Target, 'live_portals', new_callable=mock.PropertyMock) def test_get_devices_first_live(self, mock_live, mock_present): """Return on first live portal with a device.""" portal1 = mock.Mock(**{'get_device.return_value': None}) portal2 = mock.Mock(**{'get_device.return_value': '/dev/nvme0n1'}) portal3 = mock.Mock(**{'get_device.return_value': None}) mock_live.return_value = [portal1, portal2] res = self.target.get_devices(only_live=True, get_one=True) self.assertListEqual(['/dev/nvme0n1'], res) mock_live.assert_called_once_with() mock_present.assert_not_called() portal1.get_device.assert_called_once_with() portal2.get_device.assert_called_once_with() portal3.get_device.assert_not_called() @mock.patch.object(nvmeof.Target, 'present_portals', new_callable=mock.PropertyMock) @mock.patch.object(nvmeof.Target, 'live_portals', new_callable=mock.PropertyMock) def test_get_devices_get_present(self, mock_live, mock_present): """Return all devices that are found.""" portal1 = mock.Mock(**{'get_device.return_value': '/dev/nvme0n1'}) portal2 = mock.Mock(**{'get_device.return_value': None}) portal3 = mock.Mock(**{'get_device.return_value': '/dev/nvme1n1'}) mock_present.return_value = [portal1, portal2, portal3] res = self.target.get_devices(only_live=False) self.assertIsInstance(res, list) self.assertEqual({'/dev/nvme0n1', '/dev/nvme1n1'}, set(res)) mock_present.assert_called_once_with() mock_live.assert_not_called() portal1.get_device.assert_called_once_with() portal2.get_device.assert_called_once_with() portal3.get_device.assert_called_once_with() @mock.patch.object(nvmeof.Target, 'get_devices') def test_find_device_not_found(self, mock_get_devs): """Finding a devices tries up to 5 times before giving up.""" mock_get_devs.return_value = [] self.assertRaises(exception.VolumeDeviceNotFound, self.target.find_device) self.assertEqual(5, mock_get_devs.call_count) mock_get_devs.assert_has_calls( 5 * [mock.call(only_live=True, get_one=True)] ) @mock.patch.object(nvmeof.Target, 'get_devices') def test_find_device_first_found(self, mock_get_devs): """Returns the first device found.""" mock_get_devs.return_value = ['/dev/nvme0n1'] res = self.target.find_device() mock_get_devs.assert_called_once_with(only_live=True, get_one=True) self.assertEqual('/dev/nvme0n1', res) @mock.patch.object(nvmeof.Target, '_get_nvme_devices') def test_get_device_path_by_initial_devices(self, mock_get_devs): """There's a new device since we started, return it.""" self.target.portals[0].controller = 'nvme0' self.target.portals[1].controller = 'nvme1' mock_get_devs.return_value = ['/dev/nvme0n1', '/dev/nvme0n2', '/dev/nvme1n2', '/dev/nvme2n1'] self.target.devices_on_start = ['/dev/nvme0n1', '/dev/nvme1n2'] res = self.target.get_device_path_by_initial_devices() mock_get_devs.assert_called_once_with() self.assertEqual('/dev/nvme0n2', res) @mock.patch.object(nvmeof.Target, '_get_nvme_devices') def test_get_device_path_by_initial_devices_not_found(self, mock_get_devs): """There are now new devices since we started, return None.""" self.target.portals[0].controller = 'nvme0' self.target.portals[1].controller = 'nvme1' mock_get_devs.return_value = ['/dev/nvme0n1', '/dev/nvme1n2'] self.target.devices_on_start = ['/dev/nvme0n1', '/dev/nvme1n2'] res = self.target.get_device_path_by_initial_devices() mock_get_devs.assert_called_once_with() self.assertIsNone(res) @mock.patch.object(nvmeof, 'blk_property') @mock.patch.object(nvmeof.Target, '_get_nvme_devices') def test_get_device_path_by_initial_devices_multiple(self, mock_get_devs, mock_property): """There are multiple new devices, but they are the same volume.""" self.target.portals[0].controller = 'nvme0' self.target.portals[1].controller = 'nvme1' mock_property.return_value = 'uuid' mock_get_devs.return_value = ['/dev/nvme0n1', '/dev/nvme0n2', '/dev/nvme1n1', '/dev/nvme1n2'] self.target.devices_on_start = ['/dev/nvme0n1', '/dev/nvme1n1'] res = self.target.get_device_path_by_initial_devices() mock_get_devs.assert_called_once_with() self.assertEqual(2, mock_property.call_count) mock_property.assert_has_calls([mock.call('uuid', 'nvme0n2'), mock.call('uuid', 'nvme1n2')], any_order=True) # The result is any of the 2 volumes, since they are the same self.assertIn(res, ['/dev/nvme0n2', '/dev/nvme1n2']) @mock.patch.object(nvmeof, 'blk_property') @mock.patch.object(nvmeof.Target, '_get_nvme_devices') def test_get_device_path_by_initial_devices_multiple_different( self, mock_get_devs, mock_property): """There are multiple new devices and they are different.""" self.target.portals[0].controller = 'nvme0' self.target.portals[1].controller = 'nvme1' mock_property.side_effect = ('uuid1', 'uuid2') mock_get_devs.return_value = ['/dev/nvme0n1', '/dev/nvme0n2', '/dev/nvme1n1', '/dev/nvme1n2'] self.target.devices_on_start = ['/dev/nvme0n1', '/dev/nvme1n1'] res = self.target.get_device_path_by_initial_devices() mock_get_devs.assert_called_once_with() self.assertEqual(2, mock_property.call_count) mock_property.assert_has_calls([mock.call('uuid', 'nvme0n2'), mock.call('uuid', 'nvme1n2')], any_order=True) self.assertIsNone(res) @ddt.ddt class NVMeOFConnPropsTestCase(test_base.TestCase): @mock.patch.object(nvmeof.Target, 'factory') def test_init_old_props(self, mock_target): """Test init with old format connection properties.""" conn_props = {'nqn': 'nqn_value', 'transport_type': 'rdma', 'target_portal': 'portal_value', 'target_port': 'port_value', 'volume_nguid': 'nguid', 'ns_id': 'nsid', 'host_nqn': 'host_nqn_value', 'qos_specs': None, 'access_mode': 'rw', 'encrypted': False, 'cacheable': True, 'discard': True} res = nvmeof.NVMeOFConnProps(conn_props, mock.sentinel.find_controllers) self.assertFalse(res.is_replicated) self.assertIsNone(res.qos_specs) self.assertFalse(res.readonly) self.assertFalse(res.encrypted) self.assertTrue(res.cacheable) self.assertTrue(res.discard) self.assertIsNone(res.alias) self.assertIsNone(res.cinder_volume_id) mock_target.assert_called_once_with( source_conn_props=res, find_controllers=mock.sentinel.find_controllers, volume_nguid='nguid', ns_id='nsid', host_nqn='host_nqn_value', portals=[('portal_value', 'port_value', 'rdma')], vol_uuid=None, target_nqn='nqn_value', # These parameters are no necessary for the Target, but for # convenience they are accepted and ignored. qos_specs=None, access_mode='rw', encrypted=False, cacheable=True, discard=True) self.assertListEqual([mock_target.return_value], res.targets) @ddt.data('vol_uuid', 'ns_id', 'volume_nguid') @mock.patch.object(nvmeof.Target, 'factory') def test_init_new_props_unreplicated(self, id_name, mock_target): """Test init with new format connection properties but no replicas.""" conn_props = {'target_nqn': 'nqn_value', id_name: 'uuid', 'portals': [('portal1', 'port_value', 'RoCEv2'), ('portal2', 'port_value', 'anything')], 'qos_specs': None, 'access_mode': 'rw', 'encrypted': False, 'cacheable': True, 'discard': True} res = nvmeof.NVMeOFConnProps(conn_props, mock.sentinel.find_controllers) self.assertFalse(res.is_replicated) self.assertIsNone(res.qos_specs) self.assertFalse(res.readonly) self.assertFalse(res.encrypted) self.assertTrue(res.cacheable) self.assertTrue(res.discard) self.assertIsNone(res.alias) self.assertIsNone(res.cinder_volume_id) kw_id_arg = {id_name: 'uuid'} mock_target.assert_called_once_with( source_conn_props=res, find_controllers=mock.sentinel.find_controllers, target_nqn='nqn_value', portals=[('portal1', 'port_value', 'RoCEv2'), ('portal2', 'port_value', 'anything')], # These parameters are no necessary for the Target, but for # convenience they are accepted and ignored. qos_specs=None, access_mode='rw', encrypted=False, cacheable=True, discard=True, **kw_id_arg ) self.assertListEqual([mock_target.return_value], res.targets) @mock.patch.object(nvmeof.Target, 'factory') def test_init_new_props_replicated(self, mock_target): """Test init with new format connection properties with replicas.""" conn_props = { 'vol_uuid': VOL_UUID_NO_HYPHENS, 'alias': 'raid_alias', 'replica_count': 2, 'volume_replicas': [ {'target_nqn': 'nqn1', 'vol_uuid': VOL_UUID1, 'portals': [['portal1', 'port_value', 'RoCEv2'], ['portal2', 'port_value', 'anything']]}, {'target_nqn': 'nqn2', 'vol_uuid': VOL_UUID2, 'portals': [['portal4', 'port_value', 'anything'], ['portal3', 'port_value', 'RoCEv2']]} ], 'qos_specs': None, 'access_mode': 'ro', 'encrypted': True, 'cacheable': False, 'discard': False } targets = [mock.Mock(), mock.Mock()] mock_target.side_effect = targets res = nvmeof.NVMeOFConnProps(conn_props, mock.sentinel.find_controllers) self.assertTrue(res.is_replicated) self.assertIsNone(res.qos_specs) self.assertTrue(res.readonly) self.assertTrue(res.encrypted) self.assertFalse(res.cacheable) self.assertFalse(res.discard) self.assertEqual('raid_alias', res.alias) self.assertEqual(VOL_UUID, res.cinder_volume_id) self.assertEqual(2, mock_target.call_count) call_1 = dict(source_conn_props=res, find_controllers=mock.sentinel.find_controllers, vol_uuid=VOL_UUID1, target_nqn='nqn1', portals=[['portal1', 'port_value', 'RoCEv2'], ['portal2', 'port_value', 'anything']]) call_2 = dict(source_conn_props=res, find_controllers=mock.sentinel.find_controllers, vol_uuid=VOL_UUID2, target_nqn='nqn2', portals=[['portal4', 'port_value', 'anything'], ['portal3', 'port_value', 'RoCEv2']]) mock_target.assert_has_calls([mock.call(**call_1), mock.call(**call_2)]) self.assertListEqual(targets, res.targets) @mock.patch.object(nvmeof.Target, 'factory') def test_get_devices(self, mock_target): """Connector get devices gets devices from all its portals.""" conn_props = { 'vol_uuid': VOL_UUID, 'alias': 'raid_alias', 'replica_count': 2, 'volume_replicas': [ {'target_nqn': 'nqn1', 'vol_uuid': VOL_UUID1, 'portals': [['portal1', 'port_value', 'RoCEv2'], ['portal2', 'port_value', 'anything']]}, {'target_nqn': VOL_UUID2, 'vol_uuid': 'uuid2', 'portals': [['portal4', 'port_value', 'anything'], ['portal3', 'port_value', 'RoCEv2']]} ], } targets = [mock.Mock(), mock.Mock()] targets[0].get_devices.return_value = [] targets[1].get_devices.return_value = ['/dev/nvme0n1', '/dev/nvme0n2'] mock_target.side_effect = targets conn_props_instance = nvmeof.NVMeOFConnProps(conn_props) res = conn_props_instance.get_devices(mock.sentinel.only_live) self.assertListEqual(['/dev/nvme0n1', '/dev/nvme0n2'], res) @mock.patch.object(nvmeof.Target, 'factory') def test_from_dictionary_parameter(self, mock_target): """Decorator converts dict into connection properties instance.""" class Connector(object): @nvmeof.NVMeOFConnProps.from_dictionary_parameter def connect_volume(my_self, connection_properties): self.assertIsInstance(connection_properties, nvmeof.NVMeOFConnProps) return 'result' conn = Connector() conn_props = {'target_nqn': 'nqn_value', 'vol_uuid': 'uuid', 'portals': [('portal1', 'port_value', 'RoCEv2'), ('portal2', 'port_value', 'anything')]} res = conn.connect_volume(conn_props) self.assertEqual('result', res) @ddt.ddt class NVMeOFConnectorTestCase(test_connector.ConnectorTestCase): """Test cases for NVMe initiator class.""" def setUp(self): super(NVMeOFConnectorTestCase, self).setUp() self.connector = nvmeof.NVMeOFConnector(None, execute=self.fake_execute, use_multipath=False) self.conn_props_dict = {'target_nqn': 'nqn_value', 'vol_uuid': VOL_UUID, 'portals': [('portal1', 'port1', 'RoCEv2'), ('portal2', 'port2', 'tcp'), ('portal3', 'port3', 'rdma')]} self.conn_props = nvmeof.NVMeOFConnProps(self.conn_props_dict) self.patch('oslo_concurrency.lockutils.external_lock') @mock.patch.object(priv_rootwrap, 'custom_execute', autospec=True) def test_nvme_present(self, mock_execute): nvme_present = self.connector.nvme_present() self.assertTrue(nvme_present) @ddt.data(OSError(2, 'FileNotFoundError'), Exception()) @mock.patch('os_brick.initiator.connectors.nvmeof.LOG') @mock.patch.object(priv_rootwrap, 'custom_execute', autospec=True) def test_nvme_present_exception(self, exc, mock_execute, mock_log): mock_execute.side_effect = exc nvme_present = self.connector.nvme_present() log = mock_log.debug if isinstance(exc, OSError) else mock_log.warning log.assert_called_once() self.assertFalse(nvme_present) @mock.patch.object(nvmeof.NVMeOFConnector, '_execute', autospec=True) def test_get_sysuuid_without_newline(self, mock_execute): mock_execute.side_effect = [ ("/dev/sda1", ""), ("9126E942-396D-11E7-B0B7-A81E84C186D1\n", "") ] uuid = self.connector._get_host_uuid() expected_uuid = "9126E942-396D-11E7-B0B7-A81E84C186D1" self.assertEqual(expected_uuid, uuid) @mock.patch.object(nvmeof.NVMeOFConnector, '_execute', autospec=True) def test_get_sysuuid_err(self, mock_execute): mock_execute.side_effect = putils.ProcessExecutionError() uuid = self.connector._get_host_uuid() self.assertIsNone(uuid) @mock.patch.object(nvmeof.NVMeOFConnector, '_execute', autospec=True) def test_get_sysuuid_overlay(self, mock_execute): mock_execute.return_value = ("overlay\n", "") uuid = self.connector._get_host_uuid() self.assertIsNone(uuid) @mock.patch.object(utils, 'get_nvme_host_id', return_value=SYS_UUID) @mock.patch.object(nvmeof.NVMeOFConnector, '_is_native_multipath_supported', return_value=True) @mock.patch.object(nvmeof.NVMeOFConnector, 'nvme_present', return_value=True) @mock.patch.object(utils, 'get_host_nqn', return_value='fakenqn') @mock.patch.object(priv_nvmeof, 'get_system_uuid', return_value=None) @mock.patch.object(nvmeof.NVMeOFConnector, '_get_host_uuid', return_value=None) def test_get_connector_properties_without_sysuuid(self, mock_host_uuid, mock_sysuuid, mock_nqn, mock_nvme_present, mock_nat_mpath_support, mock_get_host_id): props = self.connector.get_connector_properties('sudo') expected_props = {'nqn': 'fakenqn', 'nvme_native_multipath': False, 'nvme_hostid': SYS_UUID} self.assertEqual(expected_props, props) mock_get_host_id.assert_called_once_with(None) mock_nqn.assert_called_once_with(None) @mock.patch.object(utils, 'get_nvme_host_id', return_value=SYS_UUID) @mock.patch.object(nvmeof.NVMeOFConnector, '_is_native_multipath_supported', return_value=True) @mock.patch.object(nvmeof.NVMeOFConnector, 'nvme_present') @mock.patch.object(utils, 'get_host_nqn', autospec=True) @mock.patch.object(priv_nvmeof, 'get_system_uuid', autospec=True) @mock.patch.object(nvmeof.NVMeOFConnector, '_get_host_uuid', autospec=True) def test_get_connector_properties_with_sysuuid(self, mock_host_uuid, mock_sysuuid, mock_nqn, mock_nvme_present, mock_native_mpath_support, mock_get_host_id): mock_host_uuid.return_value = HOST_UUID mock_sysuuid.return_value = SYS_UUID mock_nqn.return_value = HOST_NQN mock_nvme_present.return_value = True props = self.connector.get_connector_properties('sudo') expected_props = {"system uuid": SYS_UUID, "nqn": HOST_NQN, "uuid": HOST_UUID, 'nvme_native_multipath': False, 'nvme_hostid': SYS_UUID} self.assertEqual(expected_props, props) mock_get_host_id.assert_called_once_with(SYS_UUID) mock_nqn.assert_called_once_with(SYS_UUID) def test_get_volume_paths_device_info(self): """Device info path has highest priority.""" dev_path = '/dev/nvme0n1' device_info = {'type': 'block', 'path': dev_path} conn_props = connection_properties.copy() conn_props['device_path'] = 'lower_priority' conn_props = nvmeof.NVMeOFConnProps(conn_props) res = self.connector.get_volume_paths(conn_props, device_info) self.assertEqual([dev_path], res) def test_get_volume_paths_nova_conn_props(self): """Second highest priority is device_path nova puts in conn props.""" dev_path = '/dev/nvme0n1' device_info = None conn_props = connection_properties.copy() conn_props['device_path'] = dev_path conn_props = nvmeof.NVMeOFConnProps(conn_props) res = self.connector.get_volume_paths(conn_props, device_info) self.assertEqual([dev_path], res) @mock.patch.object(nvmeof.NVMeOFConnector, '_is_raid_device') @mock.patch.object(nvmeof.NVMeOFConnProps, 'get_devices') def test_get_volume_paths_unreplicated(self, mock_get_devs, mock_is_raid): """Search for device from unreplicated connection properties.""" mock_get_devs.return_value = ['/dev/nvme0n1'] conn_props = nvmeof.NVMeOFConnProps(volume_replicas[0]) res = self.connector.get_volume_paths(conn_props, None) self.assertEqual(mock_get_devs.return_value, res) mock_is_raid.assert_not_called() mock_get_devs.assert_called_once_with() @mock.patch.object(nvmeof.NVMeOFConnector, '_is_raid_device') @mock.patch.object(nvmeof.NVMeOFConnProps, 'get_devices') def test_get_volume_paths_single_replica(self, mock_get_devs, mock_is_raid): """Search for device from replicated conn props with 1 replica.""" dev_path = '/dev/nvme1n1' mock_get_devs.return_value = [dev_path] target_props = volume_replicas[0] connection_properties = { 'vol_uuid': VOL_UUID, 'alias': 'fakealias', 'volume_replicas': [target_props], 'replica_count': 1 } conn_props = nvmeof.NVMeOFConnProps(connection_properties) res = self.connector.get_volume_paths(conn_props, None) self.assertEqual(['/dev/md/fakealias'], res) mock_is_raid.assert_called_once_with(dev_path) mock_get_devs.assert_called_once_with() @mock.patch.object(nvmeof.NVMeOFConnector, '_is_raid_device') @mock.patch.object(nvmeof.NVMeOFConnProps, 'get_devices') def test_get_volume_paths_single_replica_not_replicated( self, mock_get_devs, mock_is_raid): """Search for device from unreplicated conn props with 1 replica.""" mock_is_raid.return_value = False dev_path = '/dev/nvme1n1' mock_get_devs.return_value = [dev_path] target_props = volume_replicas[0] connection_properties = { 'vol_uuid': VOL_UUID, 'alias': 'fakealias', 'volume_replicas': [target_props], 'replica_count': 1 } conn_props = nvmeof.NVMeOFConnProps(connection_properties) res = self.connector.get_volume_paths(conn_props, None) self.assertEqual([dev_path], res) mock_is_raid.assert_called_once_with(dev_path) mock_get_devs.assert_called_once_with() def test_get_volume_paths_replicated(self): """Search for device from replicated conn props with >1 replica.""" conn_props = nvmeof.NVMeOFConnProps(connection_properties) self.assertEqual(['/dev/md/fakealias'], self.connector.get_volume_paths(conn_props)) @mock.patch.object(nvmeof.Target, 'set_portals_controllers', mock.Mock()) @mock.patch.object(nvmeof.NVMeOFConnector, '_try_disconnect_all') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_target') def test_connect_volume_not_replicated( self, mock_connect_target, mock_disconnect): """Single vol attach.""" connection_properties = volume_replicas[0].copy() mock_connect_target.return_value = '/dev/nvme0n1' self.assertEqual({'type': 'block', 'path': '/dev/nvme0n1'}, self.connector.connect_volume(connection_properties)) mock_connect_target.assert_called_with(mock.ANY) self.assertIsInstance(mock_connect_target.call_args[0][0], nvmeof.Target) mock_disconnect.assert_not_called() @mock.patch.object(nvmeof.Target, 'set_portals_controllers', mock.Mock()) @mock.patch.object(nvmeof.NVMeOFConnector, '_try_disconnect_all') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_target') def test_connect_volume_not_replicated_fails( self, mock_connect_target, mock_disconnect): """Single vol attach fails and disconnects on failure.""" connection_properties = volume_replicas[0].copy() mock_connect_target.side_effect = exception.VolumeDeviceNotFound, self.assertRaises(exception.VolumeDeviceNotFound, self.connector.connect_volume, connection_properties) mock_connect_target.assert_called_with(mock.ANY) self.assertIsInstance(mock_connect_target.call_args[0][0], nvmeof.Target) mock_disconnect.assert_called_with(mock.ANY) self.assertIsInstance(mock_disconnect.call_args[0][0], nvmeof.NVMeOFConnProps) @mock.patch.object(nvmeof.Target, 'set_portals_controllers', mock.Mock()) @mock.patch.object(nvmeof.NVMeOFConnector, '_try_disconnect_all') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_volume_replicated') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_target') def test_connect_volume_replicated( self, mock_connect_target, mock_replicated_volume, mock_disconnect): mock_replicated_volume.return_value = '/dev/md/md1' actual = self.connector.connect_volume(connection_properties) expected = {'type': 'block', 'path': '/dev/md/md1'} self.assertEqual(expected, actual) mock_replicated_volume.assert_called_once_with(mock.ANY) self.assertIsInstance(mock_replicated_volume.call_args[0][0], nvmeof.NVMeOFConnProps) mock_connect_target.assert_not_called() mock_disconnect.assert_not_called() @mock.patch.object(nvmeof.Target, 'set_portals_controllers', mock.Mock()) @mock.patch.object(nvmeof.NVMeOFConnector, '_try_disconnect_all') @mock.patch.object(nvmeof.NVMeOFConnector, '_handle_replicated_volume') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_target') def test_connect_volume_replicated_exception( self, mock_connect_target, mock_replicated_volume, mock_disconnect): mock_connect_target.side_effect = Exception() self.assertRaises(exception.VolumeDeviceNotFound, self.connector.connect_volume, connection_properties) mock_disconnect.assert_called_with(mock.ANY) self.assertIsInstance(mock_disconnect.call_args[0][0], nvmeof.NVMeOFConnProps) @mock.patch.object(nvmeof.NVMeOFConnector, '_try_disconnect_all') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_volume_paths') @mock.patch('os.path.exists', return_value=True) def test_disconnect_volume_path_not_found( self, mock_exists, mock_get_paths, mock_disconnect): """Disconnect can't find device path from conn props and dev info.""" mock_get_paths.return_value = [] res = self.connector.disconnect_volume(connection_properties, mock.sentinel.device_info) self.assertIsNone(res) mock_get_paths.assert_called_once_with(mock.ANY, mock.sentinel.device_info) self.assertIsInstance(mock_get_paths.call_args[0][0], nvmeof.NVMeOFConnProps) mock_exists.assert_not_called() mock_disconnect.assert_not_called() @mock.patch.object(nvmeof.NVMeOFConnector, 'get_volume_paths') @mock.patch('os.path.exists', return_value=True) def test_disconnect_volume_path_doesnt_exist( self, mock_exists, mock_get_paths): """Disconnect path doesn't exist""" dev_path = '/dev/nvme0n1' mock_get_paths.return_value = [dev_path] mock_exists.return_value = False res = self.connector.disconnect_volume(connection_properties, mock.sentinel.device_info) self.assertIsNone(res) mock_get_paths.assert_called_once_with(mock.ANY, mock.sentinel.device_info) self.assertIsInstance(mock_get_paths.call_args[0][0], nvmeof.NVMeOFConnProps) mock_exists.assert_called_once_with(dev_path) @mock.patch.object(nvmeof.Target, 'set_portals_controllers', mock.Mock()) @mock.patch('os_brick.initiator.linuxscsi.LinuxSCSI.flush_device_io') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_volume_paths') @mock.patch.object(nvmeof.NVMeOFConnector, 'end_raid') @mock.patch('os.path.exists', return_value=True) def test_disconnect_volume_unreplicated( self, mock_exists, mock_end_raid, mock_get_paths, mock_flush): """Disconnect a single device.""" dev_path = '/dev/nvme0n1' mock_get_paths.return_value = [dev_path] self.connector.disconnect_volume(connection_properties, mock.sentinel.device_info, ignore_errors=True) mock_get_paths.assert_called_once_with(mock.ANY, mock.sentinel.device_info) self.assertIsInstance(mock_get_paths.call_args[0][0], nvmeof.NVMeOFConnProps) mock_exists.assert_called_once_with(dev_path) mock_end_raid.assert_not_called() mock_flush.assert_called_with(dev_path) @mock.patch.object(nvmeof.Target, 'set_portals_controllers', mock.Mock()) @mock.patch('os_brick.initiator.linuxscsi.LinuxSCSI.flush_device_io') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_volume_paths') @mock.patch.object(nvmeof.NVMeOFConnector, 'end_raid') @mock.patch('os.path.exists', return_value=True) def test_disconnect_volume_replicated( self, mock_exists, mock_end_raid, mock_get_paths, mock_flush): """Disconnect a raid.""" raid_path = '/dev/md/md1' mock_get_paths.return_value = [raid_path] self.connector.disconnect_volume(connection_properties, mock.sentinel.device_info, ignore_errors=True) mock_get_paths.assert_called_once_with(mock.ANY, mock.sentinel.device_info) self.assertIsInstance(mock_get_paths.call_args[0][0], nvmeof.NVMeOFConnProps) mock_exists.assert_called_once_with(raid_path) mock_end_raid.assert_called_with(raid_path) mock_flush.assert_not_called() def test__get_sizes_from_lba(self): """Get nsze and new size using nvme LBA information.""" nsze = 6291456 ns_data = {"nsze": nsze, "ncap": nsze, "nuse": nsze, "lbafs": [{"ms": 0, "ds": 9, "rp": 0}]} res_nsze, res_size = self.connector._get_sizes_from_lba(ns_data) self.assertEqual(nsze, res_nsze) self.assertEqual(nsze * 1 << 9, res_size) @ddt.data([{"ms": 0, "ds": 6, "rp": 0}], [{"ms": 0, "ds": 9, "rp": 0}, {"ms": 0, "ds": 9, "rp": 0}]) def test__get_sizes_from_lba_error(self, lbafs): """Incorrect data returned in LBA information.""" nsze = 6291456 ns_data = {"nsze": nsze, "ncap": nsze, "nuse": nsze, "lbafs": lbafs} res_nsze, res_size = self.connector._get_sizes_from_lba(ns_data) self.assertIsNone(res_nsze) self.assertIsNone(res_size) @mock.patch.object(nvmeof, 'blk_property') @mock.patch.object(nvmeof.NVMeOFConnector, '_get_sizes_from_lba') @mock.patch.object(nvmeof.NVMeOFConnector, '_execute') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_volume_paths') @mock.patch('os_brick.utils.get_device_size') def test_extend_volume_unreplicated(self, mock_device_size, mock_paths, mock_exec, mock_lba, mock_property): """Uses nvme to get expected size and waits until sysfs shows it.""" new_size = 3221225472 new_nsze = int(new_size / 512) # nsze is size / block-size old_nsze = int(new_nsze / 2) dev_path = '/dev/nvme0n1' mock_paths.return_value = [dev_path] stdout = '{"data": "jsondata"}' mock_exec.return_value = (stdout, '') mock_lba.return_value = (new_nsze, new_size) # Simulate a delay before the new value is present in sysfs mock_property.side_effect = (str(old_nsze), str(new_nsze)) self.assertEqual(new_size, self.connector.extend_volume(connection_properties)) mock_paths.assert_called_with(mock.ANY) self.assertIsInstance(mock_paths.call_args[0][0], nvmeof.NVMeOFConnProps) mock_exec.assert_called_once_with( 'nvme', 'id-ns', '-ojson', dev_path, run_as_root=True, root_helper=self.connector._root_helper) mock_lba.assert_called_once_with({"data": "jsondata"}) self.assertEqual(2, mock_property.call_count) mock_property.assert_has_calls([mock.call('size', 'nvme0n1'), mock.call('size', 'nvme0n1')]) mock_device_size.assert_not_called() @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof, 'blk_property') @mock.patch.object(nvmeof.NVMeOFConnector, '_get_sizes_from_lba') @mock.patch.object(nvmeof.NVMeOFConnector, '_execute') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_volume_paths') @mock.patch('os_brick.utils.get_device_size') def test_extend_volume_unreplicated_nvme_fails( self, mock_device_size, mock_paths, mock_exec, mock_lba, mock_property, mock_rescan): """nvme command fails, so it rescans, waits, and reads size.""" dev_path = '/dev/nvme0n1' mock_device_size.return_value = 100 mock_paths.return_value = [dev_path] mock_exec.side_effect = putils.ProcessExecutionError() self.assertEqual(100, self.connector.extend_volume(connection_properties)) mock_paths.assert_called_with(mock.ANY) self.assertIsInstance(mock_paths.call_args[0][0], nvmeof.NVMeOFConnProps) mock_exec.assert_called_once_with( 'nvme', 'id-ns', '-ojson', dev_path, run_as_root=True, root_helper=self.connector._root_helper) mock_lba.assert_not_called() mock_property.assert_not_called() mock_rescan.assert_called_once_with('nvme0') mock_device_size.assert_called_with(self.connector, '/dev/nvme0n1') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_volume_paths') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_mdadm') @mock.patch('os_brick.utils.get_device_size') def test_extend_volume_replicated( self, mock_device_size, mock_mdadm, mock_paths): device_path = '/dev/md/' + connection_properties['alias'] mock_paths.return_value = [device_path] mock_device_size.return_value = 100 self.assertEqual( 100, self.connector.extend_volume(connection_properties)) mock_paths.assert_called_once_with(mock.ANY) self.assertIsInstance(mock_paths.call_args[0][0], nvmeof.NVMeOFConnProps) mock_mdadm.assert_called_with( ('mdadm', '--grow', '--size', 'max', device_path)) mock_device_size.assert_called_with(self.connector, device_path) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_with_connected_device( self, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev): """Test connect target when there's a connection to the subsystem.""" self.conn_props.targets[0].portals[-1].controller = 'nvme0' mock_state.side_effect = ('connecting', None, 'live') dev_path = '/dev/nvme0n1' mock_find_dev.return_value = dev_path res = self.connector._connect_target(self.conn_props.targets[0]) self.assertEqual(dev_path, res) self.assertEqual(3, mock_state.call_count) mock_state.assert_has_calls(3 * [mock.call()]) mock_rescan.assert_called_once_with('nvme0') mock_set_ctrls.assert_called_once_with() mock_find_dev.assert_called_once_with() mock_cli.assert_not_called() @ddt.data(True, False) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, '_do_multipath') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_not_found(self, do_multipath, mock_state, mock_rescan, mock_cli, doing_multipath, mock_set_ctrls, mock_find_dev): """Test connect target fails to find device after connecting.""" self.conn_props.targets[0].portals[-1].controller = 'nvme0' doing_multipath.return_value = do_multipath retries = 3 mock_state.side_effect = retries * ['connecting', None, 'live'] mock_find_dev.side_effect = exception.VolumeDeviceNotFound() self.assertRaises(exception.VolumeDeviceNotFound, self.connector._connect_target, self.conn_props.targets[0]) self.assertEqual(retries * 3, mock_state.call_count) mock_state.assert_has_calls(retries * 3 * [mock.call()]) self.assertEqual(retries, mock_rescan.call_count) mock_rescan.assert_has_calls(retries * [mock.call('nvme0')]) self.assertEqual(retries, mock_set_ctrls.call_count) mock_set_ctrls.assert_has_calls(retries * [mock.call()]) self.assertEqual(retries, mock_find_dev.call_count) mock_find_dev.assert_has_calls(retries * [mock.call()]) if do_multipath: self.assertEqual(retries, mock_cli.call_count) mock_cli.assert_has_calls( retries * [mock.call(['connect', '-a', 'portal2', '-s', 'port2', '-t', 'tcp', '-n', 'nqn_value', '-Q', '128', '-l', '-1'])]) else: mock_cli.assert_not_called() @mock.patch('time.time', side_effect=[0, 1, 20] * 3) @mock.patch.object(nvmeof.Portal, 'reconnect_delay', new_callable=mock.PropertyMock, return_value=10) @mock.patch.object(nvmeof.Portal, 'is_live', new_callable=mock.PropertyMock, return_value=False) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_portals_down( self, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev, mock_is_live, mock_delay, mock_time): """Test connect target has all portal connections down.""" retries = 3 mock_state.side_effect = retries * 3 * ['connecting'] self.assertRaises(exception.VolumeDeviceNotFound, self.connector._connect_target, self.conn_props.targets[0]) self.assertEqual(retries * 3, mock_state.call_count) self.assertEqual(retries * 3, mock_is_live.call_count) self.assertEqual(retries * 3, mock_delay.call_count) mock_state.assert_has_calls(retries * 3 * [mock.call()]) mock_rescan.assert_not_called() mock_set_ctrls.assert_not_called() mock_find_dev.assert_not_called() mock_cli.assert_not_called() @mock.patch('time.time', side_effect=[0, 1, 20] * 3) @mock.patch.object(nvmeof.Portal, 'reconnect_delay', new_callable=mock.PropertyMock, return_value=10) @mock.patch.object(nvmeof.Portal, 'is_live', new_callable=mock.PropertyMock, return_value=False) @mock.patch.object(nvmeof.LOG, 'error') @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_no_portals_connect( self, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev, mock_log, mock_is_live, mock_delay, mock_time): """Test connect target when fails to connect to any portal.""" retries = 3 mock_state.side_effect = retries * ['connecting', 'connecting', None] mock_cli.side_effect = putils.ProcessExecutionError() target = self.conn_props.targets[0] self.assertRaises(exception.VolumeDeviceNotFound, self.connector._connect_target, target) self.assertEqual(retries, mock_log.call_count) self.assertEqual(retries * 3, mock_state.call_count) mock_state.assert_has_calls(retries * 3 * [mock.call()]) mock_rescan.assert_not_called() mock_set_ctrls.assert_not_called() mock_find_dev.assert_not_called() self.assertEqual(3, mock_cli.call_count) portal = target.portals[-1] mock_cli.assert_has_calls( retries * [mock.call(['connect', '-a', portal.address, '-s', portal.port, '-t', portal.transport, '-n', target.nqn, '-Q', '128', '-l', '-1'])]) # There are 2 in connecting state self.assertEqual(retries * 2, mock_is_live.call_count) self.assertEqual(retries * 2, mock_delay.call_count) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_new_device_path( self, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev): """Test connect when we do a new connection and find the device.""" mock_state.side_effect = ['connecting', 'connecting', None] dev_path = '/dev/nvme0n1' mock_find_dev.return_value = dev_path target = self.conn_props.targets[0] target.host_nqn = 'host_nqn' res = self.connector._connect_target(target) self.assertEqual(dev_path, res) self.assertEqual(3, mock_state.call_count) mock_state.assert_has_calls(3 * [mock.call()]) mock_rescan.assert_not_called() mock_set_ctrls.assert_called_once_with() mock_find_dev.assert_called_once_with() portal = target.portals[-1] mock_cli.assert_called_once_with([ 'connect', '-a', portal.address, '-s', portal.port, '-t', portal.transport, '-n', target.nqn, '-Q', '128', '-l', '-1', '-q', 'host_nqn']) @mock.patch.object(nvmeof.NVMeOFConnector, '_do_multipath', mock.Mock(return_value=True)) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_multipath( self, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev): """Test connect when we do a new connection and find the device.""" target = self.conn_props.targets[0] mock_state.side_effect = [None, None, None] dev_path = '/dev/nvme0n1' mock_find_dev.return_value = dev_path res = self.connector._connect_target(target) self.assertEqual(dev_path, res) self.assertEqual(3, mock_state.call_count) mock_state.assert_has_calls(3 * [mock.call()]) mock_rescan.assert_not_called() mock_set_ctrls.assert_called_once_with() mock_find_dev.assert_called_once_with() self.assertEqual(len(target.portals), mock_cli.call_count) mock_cli.assert_has_calls( [mock.call(['connect', '-a', portal.address, '-s', portal.port, '-t', portal.transport, '-n', target.nqn, '-Q', '128', '-l', '-1']) for portal in target.portals]) @ddt.data((70, '', ''), (errno.EALREADY, '', ''), (1, '', 'already connected'), (1, 'already connected', '')) @ddt.unpack @mock.patch.object(nvmeof.LOG, 'warning') @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_race( self, exit_code, stdout, stderr, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev, mock_log): """Treat race condition with sysadmin as success.""" mock_state.side_effect = ['connecting', 'connecting', None, 'live'] dev_path = '/dev/nvme0n1' mock_find_dev.return_value = dev_path mock_cli.side_effect = putils.ProcessExecutionError( exit_code=exit_code, stdout=stdout, stderr=stderr) target = self.conn_props.targets[0] res = self.connector._connect_target(target) self.assertEqual(dev_path, res) self.assertEqual(4, mock_state.call_count) mock_state.assert_has_calls(4 * [mock.call()]) mock_rescan.assert_not_called() mock_set_ctrls.assert_called_once_with() mock_find_dev.assert_called_once_with() portal = target.portals[-1] mock_cli.assert_called_once_with([ 'connect', '-a', portal.address, '-s', portal.port, '-t', portal.transport, '-n', target.nqn, '-Q', '128', '-l', '-1']) self.assertEqual(1, mock_log.call_count) @ddt.data((70, '', ''), (errno.EALREADY, '', ''), (1, '', 'already connected'), (1, 'already connected', '')) @ddt.unpack @mock.patch.object(nvmeof.LOG, 'warning') @mock.patch('time.sleep') @mock.patch('time.time', side_effect=[0, 0.1, 0.6]) @mock.patch.object(nvmeof.Portal, 'reconnect_delay', new_callable=mock.PropertyMock, return_value=10) @mock.patch.object(nvmeof.Portal, 'is_live', new_callable=mock.PropertyMock) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_race_connecting( self, exit_code, stdout, stderr, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev, mock_is_live, mock_delay, mock_time, mock_sleep, mock_log): """Test connect target when portal is reconnecting after race.""" mock_cli.side_effect = putils.ProcessExecutionError( exit_code=exit_code, stdout=stdout, stderr=stderr) mock_state.side_effect = ['connecting', 'connecting', None, 'connecting'] mock_is_live.side_effect = [False, False, False, False, True] target = self.conn_props.targets[0] res = self.connector._connect_target(target) self.assertEqual(mock_find_dev.return_value, res) self.assertEqual(4, mock_state.call_count) self.assertEqual(5, mock_is_live.call_count) self.assertEqual(3, mock_delay.call_count) self.assertEqual(2, mock_sleep.call_count) mock_sleep.assert_has_calls(2 * [mock.call(1)]) mock_rescan.assert_not_called() mock_set_ctrls.assert_called_once() mock_find_dev.assert_called_once() portal = target.portals[-1] mock_cli.assert_called_once_with([ 'connect', '-a', portal.address, '-s', portal.port, '-t', portal.transport, '-n', target.nqn, '-Q', '128', '-l', '-1']) self.assertEqual(1, mock_log.call_count) @ddt.data((70, '', ''), (errno.EALREADY, '', ''), (1, '', 'already connected'), (1, 'already connected', '')) @ddt.unpack @mock.patch.object(nvmeof.LOG, 'warning') @mock.patch.object(nvmeof.LOG, 'error') @mock.patch('time.sleep') @mock.patch('time.time', side_effect=[0, 0.1, 0.6]) @mock.patch.object(nvmeof.Portal, 'reconnect_delay', new_callable=mock.PropertyMock, return_value=10) @mock.patch.object(nvmeof.Portal, 'is_live', new_callable=mock.PropertyMock) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock) def test__connect_target_race_unknown( self, exit_code, stdout, stderr, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev, mock_is_live, mock_delay, mock_time, mock_sleep, mock_log_err, mock_log_warn): """Test connect target when portal is unknown after race.""" mock_cli.side_effect = putils.ProcessExecutionError( exit_code=exit_code, stdout=stdout, stderr=stderr) mock_state.side_effect = ['connecting', 'connecting', None, 'unknown'] mock_is_live.side_effect = [False, False, False, True] target = self.conn_props.targets[0] res = self.connector._connect_target(target) self.assertEqual(mock_find_dev.return_value, res) self.assertEqual(4, mock_state.call_count) self.assertEqual(4, mock_is_live.call_count) self.assertEqual(2, mock_delay.call_count) self.assertEqual(2, mock_sleep.call_count) mock_sleep.assert_has_calls(2 * [mock.call(1)]) mock_rescan.assert_not_called() mock_set_ctrls.assert_called_once() mock_find_dev.assert_called_once() portal = target.portals[-1] mock_cli.assert_called_once_with([ 'connect', '-a', portal.address, '-s', portal.port, '-t', portal.transport, '-n', target.nqn, '-Q', '128', '-l', '-1']) self.assertEqual(1, mock_log_err.call_count) self.assertEqual(1, mock_log_warn.call_count) @mock.patch('time.sleep') @mock.patch('time.time', side_effect=[0, 0.1, 0.6]) @mock.patch.object(nvmeof.Portal, 'reconnect_delay', new_callable=mock.PropertyMock, return_value=10) @mock.patch.object(nvmeof.Portal, 'is_live', new_callable=mock.PropertyMock) @mock.patch.object(nvmeof.Target, 'find_device') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') @mock.patch.object(nvmeof.NVMeOFConnector, 'rescan') @mock.patch.object(nvmeof.Portal, 'state', new_callable=mock.PropertyMock, return_value='connecting') def test__connect_target_portals_connecting( self, mock_state, mock_rescan, mock_cli, mock_set_ctrls, mock_find_dev, mock_is_live, mock_delay, mock_time, mock_sleep): """Test connect target when portals reconnect.""" # First pass everything connecting, second pass the second portal is up mock_is_live.side_effect = [False, False, False, False, True] # Connecting state changing after 2 sleeps target = self.conn_props.targets[0] res = self.connector._connect_target(target) self.assertEqual(mock_find_dev.return_value, res) self.assertEqual(3, mock_state.call_count) self.assertEqual(5, mock_is_live.call_count) self.assertEqual(3, mock_delay.call_count) self.assertEqual(2, mock_sleep.call_count) mock_sleep.assert_has_calls(2 * [mock.call(1)]) mock_rescan.assert_not_called() mock_set_ctrls.assert_called_once() mock_find_dev.assert_called_once() mock_cli.assert_not_called() @mock.patch.object(nvmeof.NVMeOFConnector, 'stop_and_assemble_raid') @mock.patch.object(nvmeof.NVMeOFConnector, '_is_device_in_raid') def test_handle_replicated_volume_existing( self, mock_device_raid, mock_stop_assemble_raid): mock_device_raid.return_value = True conn_props = nvmeof.NVMeOFConnProps(connection_properties) result = self.connector._handle_replicated_volume( ['/dev/nvme1n1', '/dev/nvme1n2', '/dev/nvme1n3'], conn_props) self.assertEqual('/dev/md/fakealias', result) mock_device_raid.assert_called_with('/dev/nvme1n1') mock_stop_assemble_raid.assert_called_with( ['/dev/nvme1n1', '/dev/nvme1n2', '/dev/nvme1n3'], '/dev/md/fakealias', False) @mock.patch.object(nvmeof.NVMeOFConnector, '_is_device_in_raid') def test_handle_replicated_volume_not_found(self, mock_device_raid): mock_device_raid.return_value = False conn_props = nvmeof.NVMeOFConnProps(connection_properties) conn_props.replica_count = 4 self.assertRaises(exception.VolumeDeviceNotFound, self.connector._handle_replicated_volume, ['/dev/nvme1n1', '/dev/nvme1n2', '/dev/nvme1n3'], conn_props) mock_device_raid.assert_any_call('/dev/nvme1n1') mock_device_raid.assert_any_call('/dev/nvme1n2') mock_device_raid.assert_any_call('/dev/nvme1n3') @mock.patch.object(nvmeof.NVMeOFConnector, 'create_raid') @mock.patch.object(nvmeof.NVMeOFConnector, '_is_device_in_raid') def test_handle_replicated_volume_new( self, mock_device_raid, mock_create_raid): conn_props = nvmeof.NVMeOFConnProps(connection_properties) mock_device_raid.return_value = False res = self.connector._handle_replicated_volume( ['/dev/nvme1n1', '/dev/nvme1n2', '/dev/nvme1n3'], conn_props) self.assertEqual('/dev/md/fakealias', res) mock_device_raid.assert_any_call('/dev/nvme1n1') mock_device_raid.assert_any_call('/dev/nvme1n2') mock_device_raid.assert_any_call('/dev/nvme1n3') mock_create_raid.assert_called_with( ['/dev/nvme1n1', '/dev/nvme1n2', '/dev/nvme1n3'], '1', 'fakealias', 'fakealias', False) @mock.patch.object(nvmeof.NVMeOFConnector, 'ks_readlink') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_md_name') def test_stop_and_assemble_raid_existing_simple( self, mock_md_name, mock_readlink): mock_readlink.return_value = '' mock_md_name.return_value = 'mdalias' self.assertIsNone(self.connector.stop_and_assemble_raid( ['/dev/sda'], '/dev/md/mdalias', False)) mock_md_name.assert_called_with('sda') mock_readlink.assert_called_with('/dev/md/mdalias') @mock.patch.object(nvmeof.NVMeOFConnector, 'ks_readlink') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_md_name') def test_stop_and_assemble_raid( self, mock_md_name, mock_readlink): mock_readlink.return_value = '/dev/md/mdalias' mock_md_name.return_value = 'mdalias' self.assertIsNone(self.connector.stop_and_assemble_raid( ['/dev/sda'], '/dev/md/mdalias', False)) mock_md_name.assert_called_with('sda') mock_readlink.assert_called_with('/dev/md/mdalias') @mock.patch.object(nvmeof.NVMeOFConnector, 'assemble_raid') @mock.patch.object(nvmeof.NVMeOFConnector, 'ks_readlink') @mock.patch.object(nvmeof.NVMeOFConnector, 'get_md_name') def test_stop_and_assemble_raid_err(self, mock_md_name, mock_readlink, mock_assemble): mock_readlink.return_value = '/dev/md/mdalias' mock_md_name.return_value = 'dummy' mock_assemble.side_effect = Exception() self.assertIsNone(self.connector.stop_and_assemble_raid( ['/dev/sda'], '/dev/md/mdalias', False)) mock_md_name.assert_called_with('sda') mock_readlink.assert_called_with('/dev/md/mdalias') mock_assemble.assert_called_with( ['/dev/sda'], '/dev/md/mdalias', False) @mock.patch.object(nvmeof.NVMeOFConnector, 'run_mdadm') def test_assemble_raid_simple(self, mock_run_mdadm): self.assertEqual(self.connector.assemble_raid( ['/dev/sda'], '/dev/md/md1', True), True) mock_run_mdadm.assert_called_with( ['mdadm', '--assemble', '--run', '/dev/md/md1', '-o', '/dev/sda'], True) @mock.patch.object(nvmeof.NVMeOFConnector, 'run_mdadm') def test_assemble_raid_simple_err(self, mock_run_mdadm): mock_run_mdadm.side_effect = putils.ProcessExecutionError() self.assertRaises(putils.ProcessExecutionError, self.connector.assemble_raid, ['/dev/sda'], '/dev/md/md1', True) mock_run_mdadm.assert_called_with( ['mdadm', '--assemble', '--run', '/dev/md/md1', '-o', '/dev/sda'], True) @mock.patch.object(os.path, 'exists') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_mdadm') def test_create_raid_cmd_simple(self, mock_run_mdadm, mock_os): mock_os.return_value = True self.assertIsNone(self.connector.create_raid( ['/dev/sda'], '1', 'md1', 'name', True)) mock_run_mdadm.assert_called_with( ['mdadm', '-C', '-o', 'md1', '-R', '-N', 'name', '--level', '1', '--raid-devices=1', '--bitmap=internal', '--homehost=any', '--failfast', '--assume-clean', '/dev/sda']) mock_os.assert_called_with('/dev/md/name') @mock.patch.object(nvmeof.NVMeOFConnector, 'stop_raid') @mock.patch.object(nvmeof.NVMeOFConnector, 'is_raid_exists') def test_end_raid_simple(self, mock_raid_exists, mock_stop_raid): mock_raid_exists.return_value = True mock_stop_raid.return_value = False self.assertIsNone(self.connector.end_raid('/dev/md/md1')) mock_raid_exists.assert_called_with('/dev/md/md1') mock_stop_raid.assert_called_with('/dev/md/md1', True) @mock.patch.object(os.path, 'exists') @mock.patch.object(nvmeof.NVMeOFConnector, 'stop_raid') @mock.patch.object(nvmeof.NVMeOFConnector, 'is_raid_exists') def test_end_raid(self, mock_raid_exists, mock_stop_raid, mock_os): mock_raid_exists.return_value = True mock_stop_raid.return_value = False mock_os.return_value = True self.assertIsNone(self.connector.end_raid('/dev/md/md1')) mock_raid_exists.assert_called_with('/dev/md/md1') mock_stop_raid.assert_called_with('/dev/md/md1', True) mock_os.assert_called_with('/dev/md/md1') @mock.patch.object(os.path, 'exists') @mock.patch.object(nvmeof.NVMeOFConnector, 'stop_raid') @mock.patch.object(nvmeof.NVMeOFConnector, 'is_raid_exists') def test_end_raid_err(self, mock_raid_exists, mock_stop_raid, mock_os): mock_raid_exists.return_value = True mock_stop_raid.side_effect = Exception() mock_os.return_value = True self.assertIsNone(self.connector.end_raid('/dev/md/md1')) mock_raid_exists.assert_called_with('/dev/md/md1') mock_stop_raid.assert_called_with('/dev/md/md1', True) mock_os.assert_called_with('/dev/md/md1') @mock.patch.object(nvmeof.NVMeOFConnector, 'run_mdadm') def test_stop_raid_simple(self, mock_run_mdadm): mock_run_mdadm.return_value = 'mdadm output' self.assertEqual(self.connector.stop_raid('/dev/md/md1', True), 'mdadm output') mock_run_mdadm.assert_called_with(['mdadm', '--stop', '/dev/md/md1'], True) @mock.patch.object(nvmeof.NVMeOFConnector, 'run_mdadm') def test_remove_raid_simple(self, mock_run_mdadm): self.assertIsNone(self.connector.remove_raid('/dev/md/md1')) mock_run_mdadm.assert_called_with(['mdadm', '--remove', '/dev/md/md1']) @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') def test_rescan(self, mock_run_nvme_cli): """Test successful nvme rescan.""" mock_run_nvme_cli.return_value = None result = self.connector.rescan('nvme1') self.assertIsNone(result) nvme_command = ('ns-rescan', NVME_DEVICE_PATH) mock_run_nvme_cli.assert_called_with(nvme_command) @mock.patch.object(nvmeof.NVMeOFConnector, 'run_nvme_cli') def test_rescan_err(self, mock_run_nvme_cli): """Test failure on nvme rescan subprocess execution.""" mock_run_nvme_cli.side_effect = Exception() self.assertRaises(exception.CommandExecutionFailed, self.connector.rescan, 'nvme1') nvme_command = ('ns-rescan', NVME_DEVICE_PATH) mock_run_nvme_cli.assert_called_with(nvme_command) @mock.patch.object(executor.Executor, '_execute') def test_is_raid_exists_not(self, mock_execute): mock_execute.return_value = (VOL_UUID + "\n", "") result = self.connector.is_raid_exists(NVME_DEVICE_PATH) self.assertEqual(False, result) cmd = ['mdadm', '--detail', NVME_DEVICE_PATH] args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) @mock.patch.object(executor.Executor, '_execute') def test_is_raid_exists(self, mock_execute): mock_execute.return_value = (NVME_DEVICE_PATH + ':' + "\n", "") result = self.connector.is_raid_exists(NVME_DEVICE_PATH) self.assertEqual(True, result) cmd = ['mdadm', '--detail', NVME_DEVICE_PATH] args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) @mock.patch.object(executor.Executor, '_execute') def test_is_raid_exists_err(self, mock_execute): mock_execute.side_effect = putils.ProcessExecutionError result = self.connector.is_raid_exists(NVME_DEVICE_PATH) self.assertEqual(False, result) cmd = ['mdadm', '--detail', NVME_DEVICE_PATH] args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) def test_get_md_name(self): mock_open = mock.mock_open(read_data=md_stat_contents) with mock.patch('builtins.open', mock_open): result = self.connector.get_md_name(os.path.basename(NVME_NS_PATH)) self.assertEqual('md0', result) mock_open.assert_called_once_with('/proc/mdstat', 'r') mock_fd = mock_open.return_value.__enter__.return_value mock_fd.__iter__.assert_called_once_with() @mock.patch.object(builtins, 'open', side_effect=Exception) def test_get_md_name_err(self, mock_open): result = self.connector.get_md_name(os.path.basename(NVME_NS_PATH)) self.assertIsNone(result) mock_open.assert_called_once_with('/proc/mdstat', 'r') @mock.patch.object(executor.Executor, '_execute') def test_is_device_in_raid(self, mock_execute): mock_execute.return_value = (NVME_DEVICE_PATH + ':' + "\n", "") result = self.connector._is_device_in_raid(NVME_DEVICE_PATH) self.assertEqual(True, result) cmd = ['mdadm', '--examine', NVME_DEVICE_PATH] args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) @mock.patch.object(executor.Executor, '_execute') def test_is_device_in_raid_not_found(self, mock_execute): mock_execute.return_value = (VOL_UUID + "\n", "") result = self.connector._is_device_in_raid(NVME_DEVICE_PATH) self.assertEqual(False, result) cmd = ['mdadm', '--examine', NVME_DEVICE_PATH] args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) @mock.patch.object(executor.Executor, '_execute') def test_is_device_in_raid_err(self, mock_execute): mock_execute.side_effect = putils.ProcessExecutionError() result = self.connector._is_device_in_raid(NVME_DEVICE_PATH) self.assertEqual(False, result) cmd = ['mdadm', '--examine', NVME_DEVICE_PATH] args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) @mock.patch.object(executor.Executor, '_execute') def test_run_mdadm(self, mock_execute): mock_execute.return_value = (VOL_UUID + "\n", "") cmd = ['mdadm', '--examine', NVME_DEVICE_PATH] result = self.connector.run_mdadm(cmd) self.assertEqual(VOL_UUID, result) args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) @mock.patch.object(executor.Executor, '_execute') def test_run_mdadm_err(self, mock_execute): mock_execute.side_effect = putils.ProcessExecutionError() cmd = ['mdadm', '--examine', NVME_DEVICE_PATH] result = self.connector.run_mdadm(cmd) self.assertIsNone(result) args, kwargs = mock_execute.call_args self.assertEqual(args[0], cmd[0]) self.assertEqual(args[1], cmd[1]) self.assertEqual(args[2], cmd[2]) @mock.patch.object(builtins, 'open') def test_get_host_nqn_file_available(self, mock_open): mock_open.return_value.__enter__.return_value.read = ( lambda: HOST_NQN + "\n") host_nqn = utils.get_host_nqn() mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'r') self.assertEqual(HOST_NQN, host_nqn) @mock.patch.object(utils.priv_nvme, 'create_hostnqn') @mock.patch.object(builtins, 'open') def test_get_host_nqn_io_err(self, mock_open, mock_create): mock_create.return_value = mock.sentinel.nqn mock_open.side_effect = IOError() result = utils.get_host_nqn() mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'r') mock_create.assert_called_once_with(None) self.assertEqual(mock.sentinel.nqn, result) @mock.patch.object(utils.priv_nvme, 'create_hostnqn') @mock.patch.object(builtins, 'open') def test_get_host_nqn_io_err_sys_uuid(self, mock_open, mock_create): mock_create.return_value = mock.sentinel.nqn mock_open.side_effect = IOError() result = utils.get_host_nqn(mock.sentinel.system_uuid) mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'r') mock_create.assert_called_once_with(mock.sentinel.system_uuid) self.assertEqual(mock.sentinel.nqn, result) @mock.patch.object(utils.priv_nvme, 'create_hostnqn') @mock.patch.object(builtins, 'open') def test_get_host_nqn_err(self, mock_open, mock_create): mock_open.side_effect = Exception() result = utils.get_host_nqn() mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'r') mock_create.assert_not_called() self.assertIsNone(result) @mock.patch.object(executor.Executor, '_execute') def test_run_nvme_cli(self, mock_execute): mock_execute.return_value = ("\n", "") cmd = 'dummy command' result = self.connector.run_nvme_cli(cmd) self.assertEqual(("\n", ""), result) def test_ks_readlink(self): dest = 'dummy path' result = self.connector.ks_readlink(dest) self.assertEqual('', result) @mock.patch.object(executor.Executor, '_execute') def test__get_fs_type(self, mock_execute): mock_execute.return_value = ('expected\n', '') result = self.connector._get_fs_type(NVME_DEVICE_PATH) self.assertEqual('expected', result) mock_execute.assert_called_once_with( 'blkid', NVME_DEVICE_PATH, '-s', 'TYPE', '-o', 'value', run_as_root=True, root_helper=self.connector._root_helper, check_exit_code=False) @mock.patch.object(executor.Executor, '_execute', return_value=('', 'There was a big error')) def test__get_fs_type_err(self, mock_execute): result = self.connector._get_fs_type(NVME_DEVICE_PATH) self.assertIsNone(result) mock_execute.assert_called_once_with( 'blkid', NVME_DEVICE_PATH, '-s', 'TYPE', '-o', 'value', run_as_root=True, root_helper=self.connector._root_helper, check_exit_code=False) @mock.patch.object(nvmeof.NVMeOFConnector, '_get_fs_type') def test__is_raid_device(self, mock_get_fs_type): mock_get_fs_type.return_value = 'linux_raid_member' result = self.connector._is_raid_device(NVME_DEVICE_PATH) self.assertTrue(result) mock_get_fs_type.assert_called_once_with(NVME_DEVICE_PATH) @mock.patch.object(nvmeof.NVMeOFConnector, '_get_fs_type') def test__is_raid_device_not(self, mock_get_fs_type): mock_get_fs_type.return_value = 'xfs' result = self.connector._is_raid_device(NVME_DEVICE_PATH) self.assertFalse(result) mock_get_fs_type.assert_called_once_with(NVME_DEVICE_PATH) @ddt.data(True, False) @mock.patch.object(nvmeof.NVMeOFConnector, 'native_multipath_supported', None) @mock.patch.object(nvmeof.NVMeOFConnector, '_is_native_multipath_supported') def test__set_native_multipath_supported(self, value, mock_ana): mock_ana.return_value = value res = self.connector._set_native_multipath_supported() mock_ana.assert_called_once_with() self.assertIs(value, res) @mock.patch.object(nvmeof.NVMeOFConnector, 'native_multipath_supported', True) @mock.patch.object(nvmeof.NVMeOFConnector, '_is_native_multipath_supported') def test__set_native_multipath_supported_second_call(self, mock_ana): mock_ana.return_value = False res = self.connector._set_native_multipath_supported() mock_ana.assert_not_called() self.assertTrue(res) @mock.patch.object(nvmeof.NVMeOFConnector, '_handle_single_replica') @mock.patch.object(nvmeof.NVMeOFConnector, '_handle_replicated_volume') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_target') def test__connect_volume_replicated( self, mock_connect, mock_replicated, mock_single): """Connect to replicated backend handles connection failures.""" found_devices = ['/dev/nvme0n1', '/dev/nvme1n1'] mock_connect.side_effect = [Exception] + found_devices res = self.connector._connect_volume_replicated(CONN_PROPS) self.assertEqual(mock_replicated.return_value, res) mock_replicated.assert_called_once_with(found_devices, CONN_PROPS) mock_single.assert_not_called() @mock.patch.object(nvmeof.NVMeOFConnector, '_handle_single_replica') @mock.patch.object(nvmeof.NVMeOFConnector, '_handle_replicated_volume') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_target') def test__connect_volume_replicated_single_replica( self, mock_connect, mock_replicated, mock_single): """Connect to single repica backend.""" conn_props = nvmeof.NVMeOFConnProps({ 'alias': 'fakealias', 'vol_uuid': VOL_UUID, 'volume_replicas': [volume_replicas[0]], 'replica_count': 1 }) found_devices = ['/dev/nvme0n1'] mock_connect.side_effect = found_devices res = self.connector._connect_volume_replicated(conn_props) self.assertEqual(mock_single.return_value, res) mock_replicated.assert_not_called() mock_single.assert_called_once_with(found_devices, 'fakealias') @mock.patch.object(nvmeof.NVMeOFConnector, '_handle_single_replica') @mock.patch.object(nvmeof.NVMeOFConnector, '_handle_replicated_volume') @mock.patch.object(nvmeof.NVMeOFConnector, '_connect_target') def test__connect_volume_replicated_no_device_paths_found( self, mock_connect, mock_replicated, mock_single): """Fail if cannot connect to any replica.""" mock_connect.side_effect = 3 * [Exception] self.assertRaises(exception.VolumeDeviceNotFound, self.connector._connect_volume_replicated, CONN_PROPS) mock_replicated.assert_not_called() mock_single.assert_not_called() @ddt.data({'result': False, 'use_multipath': False, 'ana_support': True}, {'result': False, 'use_multipath': False, 'ana_support': False}, {'result': False, 'use_multipath': True, 'ana_support': False}, {'result': True, 'use_multipath': True, 'ana_support': True}) @ddt.unpack def test__do_multipath(self, result, use_multipath, ana_support): self.connector.use_multipath = use_multipath self.connector.native_multipath_supported = ana_support self.assertIs(result, self.connector._do_multipath()) @mock.patch.object(nvmeof.NVMeOFConnector, '_try_disconnect') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') def test__try_disconnect_all(self, mock_set_portals, mock_disconnect): """Disconnect all portals for all targets in connection properties.""" connection_properties = { 'vol_uuid': VOL_UUID, 'alias': 'raid_alias', 'replica_count': 2, 'volume_replicas': [ {'target_nqn': 'nqn1', 'vol_uuid': VOL_UUID1, 'portals': [['portal1', 'port_value', 'RoCEv2'], ['portal2', 'port_value', 'anything']]}, {'target_nqn': 'nqn2', 'vol_uuid': VOL_UUID2, 'portals': [['portal4', 'port_value', 'anything'], ['portal3', 'port_value', 'RoCEv2']]} ], } conn_props = nvmeof.NVMeOFConnProps(connection_properties) exc = exception.ExceptionChainer() self.connector._try_disconnect_all(conn_props, exc) self.assertEqual(2, mock_set_portals.call_count) mock_set_portals.assert_has_calls((mock.call(), mock.call())) self.assertEqual(4, mock_disconnect.call_count) mock_disconnect.assert_has_calls(( mock.call(conn_props.targets[0].portals[0]), mock.call(conn_props.targets[0].portals[1]), mock.call(conn_props.targets[1].portals[0]), mock.call(conn_props.targets[1].portals[1]) )) self.assertFalse(bool(exc)) @mock.patch.object(nvmeof.NVMeOFConnector, '_try_disconnect') @mock.patch.object(nvmeof.Target, 'set_portals_controllers') def test__try_disconnect_all_with_failures( self, mock_set_portals, mock_disconnect): """Even with failures it should try to disconnect all portals.""" exc = exception.ExceptionChainer() mock_disconnect.side_effect = [Exception, None] self.connector._try_disconnect_all(self.conn_props, exc) mock_set_portals.assert_called_once_with() self.assertEqual(3, mock_disconnect.call_count) mock_disconnect.assert_has_calls(( mock.call(self.conn_props.targets[0].portals[0]), mock.call(self.conn_props.targets[0].portals[1]), mock.call(self.conn_props.targets[0].portals[2]) )) self.assertTrue(bool(exc)) @mock.patch.object(nvmeof.NVMeOFConnector, '_execute') @mock.patch.object(nvmeof.Portal, 'can_disconnect') def test__try_disconnect(self, mock_can_disconnect, mock_execute): """We try to disconnect when we can without breaking other devices.""" mock_can_disconnect.return_value = True portal = self.conn_props.targets[0].portals[0] portal.controller = 'nvme0' self.connector._try_disconnect(portal) mock_can_disconnect.assert_called_once_with() mock_execute.assert_called_once_with( 'nvme', 'disconnect', '-d', '/dev/nvme0', root_helper=self.connector._root_helper, run_as_root=True) @mock.patch.object(nvmeof.NVMeOFConnector, '_execute') @mock.patch.object(nvmeof.Portal, 'can_disconnect') def test__try_disconnect_failure(self, mock_can_disconnect, mock_execute): """Confirm disconnect doesn't swallow exceptions.""" mock_can_disconnect.return_value = True portal = self.conn_props.targets[0].portals[0] portal.controller = 'nvme0' mock_execute.side_effect = ValueError self.assertRaises(ValueError, self.connector._try_disconnect, portal) mock_can_disconnect.assert_called_once_with() mock_execute.assert_called_once_with( 'nvme', 'disconnect', '-d', '/dev/nvme0', root_helper=self.connector._root_helper, run_as_root=True) @mock.patch.object(nvmeof.NVMeOFConnector, '_execute') @mock.patch.object(nvmeof.Portal, 'can_disconnect') def test__try_disconnect_no_disconnect( self, mock_can_disconnect, mock_execute): """Doesn't disconnect when it would break other devices.""" mock_can_disconnect.return_value = False portal = self.conn_props.targets[0].portals[0] self.connector._try_disconnect(portal) mock_can_disconnect.assert_called_once_with() mock_execute.assert_not_called() @ddt.data(False, True) def test_supports_multipath(self, ana_support): self.connector.native_multipath_supported = ana_support self.assertEqual(ana_support, self.connector.supports_multipath()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_rbd.py0000664000175000017500000006102000000000000024710 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_brick import exception from os_brick.initiator.connectors import rbd from os_brick.initiator import linuxrbd from os_brick.privileged import rootwrap as priv_rootwrap from os_brick.tests.initiator.connectors import test_base_rbd from os_brick.tests.initiator import test_connector from os_brick import utils @ddt.ddt class RBDConnectorTestCase(test_base_rbd.RBDConnectorTestMixin, test_connector.ConnectorTestCase): def test_get_search_path(self): rbd_connector = rbd.RBDConnector(None) path = rbd_connector.get_search_path() self.assertIsNone(path) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.rados') def test_get_volume_paths(self, mock_rados, mock_rbd): rbd_connector = rbd.RBDConnector(None) expected = [] actual = rbd_connector.get_volume_paths(self.connection_properties) self.assertEqual(expected, actual) def test_get_connector_properties(self): props = rbd.RBDConnector.get_connector_properties( 'sudo', multipath=True, enforce_multipath=True) expected_props = {'do_local_attach': False} self.assertEqual(expected_props, props) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.rados') @mock.patch.object(rbd.RBDConnector, '_create_ceph_conf') @mock.patch('os.path.exists') def test_connect_volume(self, mock_path, mock_conf, mock_rados, mock_rbd): """Test the connect volume case.""" rbd_connector = rbd.RBDConnector(None) mock_path.return_value = False mock_conf.return_value = "/tmp/fake_dir/fake_ceph.conf" device_info = rbd_connector.connect_volume(self.connection_properties) # Ensure rados is instantiated correctly mock_rados.Rados.assert_called_once_with( clustername=self.clustername, rados_id=utils.convert_str(self.user), conffile='/tmp/fake_dir/fake_ceph.conf') # Ensure correct calls to connect to cluster self.assertEqual(1, mock_rados.Rados.return_value.connect.call_count) mock_rados.Rados.return_value.open_ioctx.assert_called_once_with( utils.convert_str(self.pool)) # Ensure rbd image is instantiated correctly mock_rbd.Image.assert_called_once_with( mock_rados.Rados.return_value.open_ioctx.return_value, utils.convert_str(self.volume), read_only=False, snapshot=None) # Ensure expected object is returned correctly self.assertIsInstance(device_info['path'], linuxrbd.RBDVolumeIOWrapper) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.rados') @mock.patch.object(rbd.RBDConnector, '_create_ceph_conf') @mock.patch('os.path.exists') def test_provided_keyring(self, mock_path, mock_conf, mock_rados, mock_rbd): conn = rbd.RBDConnector(None) mock_path.return_value = False mock_conf.return_value = "/tmp/fake_dir/fake_ceph.conf" self.connection_properties['keyring'] = self.keyring conn.connect_volume(self.connection_properties) mock_conf.assert_called_once_with(self.hosts, self.ports, self.clustername, self.user, self.keyring) def test_keyring_is_none(self): conn = rbd.RBDConnector(None) keyring = None keyring_data = "[client.cinder]\n key = test\n" mockopen = mock.mock_open(read_data=keyring_data) mockopen.return_value.__exit__ = mock.Mock() with mock.patch('os_brick.initiator.connectors.rbd.open', mockopen, create=True): self.assertEqual( conn._check_or_get_keyring_contents(keyring, 'cluster', 'user'), keyring_data) self.assertEqual( conn._check_or_get_keyring_contents(keyring, 'cluster', None), '') def test_keyring_raise_error(self): conn = rbd.RBDConnector(None) keyring = None mockopen = mock.mock_open() mockopen.return_value = "" with mock.patch('os_brick.initiator.connectors.rbd.open', mockopen, create=True) as mock_keyring_file: mock_keyring_file.side_effect = IOError self.assertRaises(exception.BrickException, conn._check_or_get_keyring_contents, keyring, 'cluster', 'user') @mock.patch('os_brick.initiator.connectors.rbd.tempfile.mkstemp') def test_create_ceph_conf(self, mock_mkstemp): mockopen = mock.mock_open() fd = mock.sentinel.fd tmpfile = mock.sentinel.tmpfile mock_mkstemp.return_value = (fd, tmpfile) with mock.patch('os.fdopen', mockopen, create=True): rbd_connector = rbd.RBDConnector(None) conf_path = rbd_connector._create_ceph_conf( self.hosts, self.ports, self.clustername, self.user, self.keyring) self.assertEqual(conf_path, tmpfile) mock_mkstemp.assert_called_once_with(prefix='brickrbd_') # Bug #1865754 - make sure generated config file has a '[global]' # section _, args, _ = mockopen().writelines.mock_calls[0] self.assertIn('[global]', args[0]) @mock.patch('os_brick.privileged.rbd.root_create_ceph_conf') def test_create_non_openstack_config(self, mock_priv_create): res = rbd.RBDConnector.create_non_openstack_config( self.connection_properties) mock_priv_create.assert_called_once_with(self.hosts, self.ports, self.clustername, self.user, self.keyring) self.assertIs(mock_priv_create.return_value, res) @mock.patch('os_brick.privileged.rbd.root_create_ceph_conf') def test_create_non_openstack_config_in_openstack(self, mock_priv_create): connection_properties = self.connection_properties.copy() del connection_properties['keyring'] res = rbd.RBDConnector.create_non_openstack_config( connection_properties) mock_priv_create.assert_not_called() self.assertIsNone(res) @mock.patch.object(priv_rootwrap, 'execute', return_value=None) def test_connect_local_volume(self, mock_execute): rbd_connector = rbd.RBDConnector(None, do_local_attach=True) conn = {'name': 'pool/image', 'auth_username': 'fake_user', 'hosts': ['192.168.10.2'], 'ports': ['6789']} device_info = rbd_connector.connect_volume(conn) execute_call1 = mock.call('which', 'rbd') cmd = ['rbd', 'map', 'image', '--pool', 'pool', '--id', 'fake_user', '--mon_host', '192.168.10.2:6789'] execute_call2 = mock.call(*cmd, root_helper=None, run_as_root=True) mock_execute.assert_has_calls([execute_call1, execute_call2]) expected_info = {'path': '/dev/rbd/pool/image', 'type': 'block'} self.assertEqual(expected_info, device_info) @mock.patch.object(priv_rootwrap, 'execute', return_value=None) @mock.patch('os.path.exists') @mock.patch('os.path.islink') @mock.patch('os.path.realpath') def test_connect_local_volume_dev_exist(self, mock_realpath, mock_islink, mock_exists, mock_execute): rbd_connector = rbd.RBDConnector(None, do_local_attach=True) conn = {'name': 'pool/image', 'auth_username': 'fake_user', 'hosts': ['192.168.10.2'], 'ports': ['6789']} mock_realpath.return_value = '/dev/rbd0' mock_islink.return_value = True mock_exists.return_value = True device_info = rbd_connector.connect_volume(conn) execute_call1 = mock.call('which', 'rbd') cmd = ['rbd', 'map', 'image', '--pool', 'pool', '--id', 'fake_user', '--mon_host', '192.168.10.2:6789'] execute_call2 = mock.call(*cmd, root_helper=None, run_as_root=True) mock_execute.assert_has_calls([execute_call1]) self.assertNotIn(execute_call2, mock_execute.mock_calls) expected_info = {'path': '/dev/rbd/pool/image', 'type': 'block'} self.assertEqual(expected_info, device_info) @mock.patch.object(priv_rootwrap, 'execute', return_value=None) def test_connect_local_volume_without_mons(self, mock_execute): rbd_connector = rbd.RBDConnector(None, do_local_attach=True) conn = {'name': 'pool/image', 'auth_username': 'fake_user'} device_info = rbd_connector.connect_volume(conn) execute_call1 = mock.call('which', 'rbd') cmd = ['rbd', 'map', 'image', '--pool', 'pool', '--id', 'fake_user'] execute_call2 = mock.call(*cmd, root_helper=None, run_as_root=True) mock_execute.assert_has_calls([execute_call1, execute_call2]) expected_info = {'path': '/dev/rbd/pool/image', 'type': 'block'} self.assertEqual(expected_info, device_info) @mock.patch('os_brick.initiator.connectors.rbd.' 'RBDConnector._local_attach_volume') def test_connect_volume_local(self, mock_local_attach): connector = rbd.RBDConnector(None, do_local_attach=True) res = connector.connect_volume(self.connection_properties) mock_local_attach.assert_called_once_with(self.connection_properties) self.assertIs(mock_local_attach.return_value, res) @mock.patch.object(rbd.RBDConnector, '_get_rbd_args') @mock.patch.object(rbd.RBDConnector, 'create_non_openstack_config') @mock.patch.object(rbd.RBDConnector, '_execute') def test__local_attach_volume_non_openstack(self, mock_execute, mock_rbd_cfg, mock_args): mock_args.return_value = [mock.sentinel.rbd_args] connector = rbd.RBDConnector(None, do_local_attach=True) res = connector._local_attach_volume(self.connection_properties) mock_rbd_cfg.assert_called_once_with(self.connection_properties) mock_args.assert_called_once_with(self.connection_properties, mock_rbd_cfg.return_value) self.assertEqual(2, mock_execute.call_count) mock_execute.assert_has_calls([ mock.call('which', 'rbd'), mock.call('rbd', 'map', 'fake_volume', '--pool', 'fake_pool', mock.sentinel.rbd_args, root_helper=connector._root_helper, run_as_root=True) ]) expected = {'path': '/dev/rbd/fake_pool/fake_volume', 'type': 'block', 'conf': mock_rbd_cfg.return_value} self.assertEqual(expected, res) @mock.patch('os_brick.privileged.rbd.delete_if_exists') @mock.patch.object(rbd.RBDConnector, '_get_rbd_args') @mock.patch.object(rbd.RBDConnector, 'create_non_openstack_config') @mock.patch.object(rbd.RBDConnector, '_execute') def test__local_attach_volume_fail_non_openstack(self, mock_execute, mock_rbd_cfg, mock_args, mock_delete): mock_args.return_value = [mock.sentinel.rbd_args] mock_execute.side_effect = [None, ValueError] connector = rbd.RBDConnector(None, do_local_attach=True) self.assertRaises(ValueError, connector._local_attach_volume, self.connection_properties) mock_rbd_cfg.assert_called_once_with(self.connection_properties) mock_args.assert_called_once_with(self.connection_properties, mock_rbd_cfg.return_value) self.assertEqual(2, mock_execute.call_count) mock_execute.assert_has_calls([ mock.call('which', 'rbd'), mock.call('rbd', 'map', 'fake_volume', '--pool', 'fake_pool', mock.sentinel.rbd_args, root_helper=connector._root_helper, run_as_root=True) ]) mock_delete.assert_called_once_with(mock_rbd_cfg.return_value) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.rados') @mock.patch.object(linuxrbd.RBDVolumeIOWrapper, 'close') def test_disconnect_volume(self, volume_close, mock_rados, mock_rbd): """Test the disconnect volume case.""" rbd_connector = rbd.RBDConnector(None) device_info = rbd_connector.connect_volume(self.connection_properties) rbd_connector.disconnect_volume( self.connection_properties, device_info) self.assertEqual(1, volume_close.call_count) @ddt.data( """ [{"id":"0","pool":"pool","device":"/dev/rbd0","name":"image"}, {"id":"1","pool":"pool","device":"/dev/rdb1","name":"image_2"}] """, # new-style output (ceph 13.2.0+) """ {"0":{"pool":"pool","device":"/dev/rbd0","name":"image"}, "1":{"pool":"pool","device":"/dev/rdb1","name":"image_2"}} """, # old-style output ) @mock.patch('os_brick.privileged.rbd.delete_if_exists') @mock.patch.object(priv_rootwrap, 'execute', return_value=None) def test_disconnect_local_volume(self, rbd_map_out, mock_execute, mock_delete): """Test the disconnect volume case with local attach.""" rbd_connector = rbd.RBDConnector(None, do_local_attach=True) conn = {'name': 'pool/image', 'auth_username': 'fake_user', 'hosts': ['192.168.10.2'], 'ports': ['6789']} mock_execute.side_effect = [(rbd_map_out, None), (None, None)] show_cmd = ['rbd', 'showmapped', '--format=json', '--id', 'fake_user', '--mon_host', '192.168.10.2:6789'] unmap_cmd = ['rbd', 'unmap', '/dev/rbd0', '--id', 'fake_user', '--mon_host', '192.168.10.2:6789'] rbd_connector.disconnect_volume(conn, None) # Assert that showmapped is used before we unmap the root device mock_execute.assert_has_calls([ mock.call(*show_cmd, root_helper=None, run_as_root=True), mock.call(*unmap_cmd, root_helper=None, run_as_root=True)]) mock_delete.assert_not_called() @mock.patch('os_brick.privileged.rbd.delete_if_exists') @mock.patch.object(rbd.RBDConnector, '_find_root_device') @mock.patch.object(rbd.RBDConnector, '_execute') def test_disconnect_local_volume_non_openstack(self, mock_execute, mock_find, mock_delete): connector = rbd.RBDConnector(None, do_local_attach=True) mock_find.return_value = '/dev/rbd0' connector.disconnect_volume(self.connection_properties, {'conf': mock.sentinel.conf}) mock_find.assert_called_once_with(self.connection_properties, mock.sentinel.conf) mock_execute.assert_called_once_with( 'rbd', 'unmap', '/dev/rbd0', '--id', 'fake_user', '--mon_host', '192.168.10.2:6789', '--conf', mock.sentinel.conf, root_helper=connector._root_helper, run_as_root=True) mock_delete.assert_called_once_with(mock.sentinel.conf) @mock.patch.object(priv_rootwrap, 'execute', return_value=None) def test_disconnect_local_volume_no_mapping(self, mock_execute): rbd_connector = rbd.RBDConnector(None, do_local_attach=True) conn = {'name': 'pool/not_mapped', 'auth_username': 'fake_user', 'hosts': ['192.168.10.2'], 'ports': ['6789']} mock_execute.return_value = (""" {"0":{"pool":"pool","device":"/dev/rbd0","name":"pool-image"}, "1":{"pool":"pool","device":"/dev/rdb1","name":"pool-image_2"}}""", None) show_cmd = ['rbd', 'showmapped', '--format=json', '--id', 'fake_user', '--mon_host', '192.168.10.2:6789'] rbd_connector.disconnect_volume(conn, None) # Assert that only showmapped is called when no mappings are found mock_execute.assert_called_once_with(*show_cmd, root_helper=None, run_as_root=True) @mock.patch.object(priv_rootwrap, 'execute', return_value=None) def test_disconnect_local_volume_no_mappings(self, mock_execute): rbd_connector = rbd.RBDConnector(None, do_local_attach=True) conn = {'name': 'pool/image', 'auth_username': 'fake_user', 'hosts': ['192.168.10.2'], 'ports': ['6789']} mock_execute.return_value = ("{}", None) show_cmd = ['rbd', 'showmapped', '--format=json', '--id', 'fake_user', '--mon_host', '192.168.10.2:6789'] rbd_connector.disconnect_volume(conn, None) # Assert that only showmapped is called when no mappings are found mock_execute.assert_called_once_with(*show_cmd, root_helper=None, run_as_root=True) @mock.patch('oslo_utils.fileutils.delete_if_exists') @mock.patch.object(rbd.RBDConnector, '_get_rbd_handle') def test_extend_volume_handle(self, mock_handle, mock_delete): connector = rbd.RBDConnector(None) res = connector.extend_volume(self.connection_properties) mock_handle.assert_called_once_with(self.connection_properties) mock_handle.return_value.seek.assert_called_once_with(0, 2) mock_handle.return_value.tell.assert_called_once_with() self.assertIs(mock_handle().tell(), res) mock_delete.assert_called_once_with(mock_handle().rbd_conf) mock_handle.return_value.close.assert_called_once_with() @mock.patch('oslo_utils.fileutils.delete_if_exists') @mock.patch.object(rbd.RBDConnector, '_get_rbd_handle') def test_extend_volume_handle_fail(self, mock_handle, mock_delete): mock_handle.return_value.seek.side_effect = ValueError connector = rbd.RBDConnector(None) self.assertRaises(ValueError, connector.extend_volume, self.connection_properties) mock_handle.assert_called_once_with(self.connection_properties) mock_handle.return_value.seek.assert_called_once_with(0, 2) mock_handle().tell.assert_not_called() mock_delete.assert_called_once_with(mock_handle.return_value.rbd_conf) mock_handle.return_value.close.assert_called_once_with() @mock.patch.object(rbd, 'open') @mock.patch('os_brick.privileged.rbd.delete_if_exists') @mock.patch.object(rbd.RBDConnector, '_find_root_device') @mock.patch.object(rbd.RBDConnector, 'create_non_openstack_config') def test_extend_volume_block(self, mock_config, mock_find, mock_delete, mock_open): mock_find.return_value = '/dev/rbd1' file_handle = mock_open.return_value.__enter__.return_value file_handle.read.return_value = '123456789' connector = rbd.RBDConnector(None, do_local_attach=True) res = connector.extend_volume(self.connection_properties) mock_config.assert_called_once_with(self.connection_properties) mock_find.assert_called_once_with(self.connection_properties, mock_config.return_value) mock_delete.assert_called_once_with(mock_config.return_value) mock_open.assert_called_once_with('/sys/devices/rbd/1/size') file_handle.read.assert_called_once_with() self.assertEqual(123456789, res) @mock.patch.object(rbd, 'open') @mock.patch('os_brick.privileged.rbd.delete_if_exists') @mock.patch.object(rbd.RBDConnector, '_find_root_device') @mock.patch.object(rbd.RBDConnector, 'create_non_openstack_config') def test_extend_volume_no_device_local(self, mock_config, mock_find, mock_delete, mock_open): mock_find.return_value = None connector = rbd.RBDConnector(None, do_local_attach=True) self.assertRaises(exception.BrickException, connector.extend_volume, self.connection_properties) mock_config.assert_called_once_with(self.connection_properties) mock_find.assert_called_once_with(self.connection_properties, mock_config.return_value) mock_delete.assert_called_once_with(mock_config.return_value) mock_open.assert_not_called() @mock.patch.object(rbd.RBDConnector, '_get_rbd_args') @mock.patch.object(rbd.RBDConnector, '_execute') def test_find_root_device(self, mock_execute, mock_args): mock_args.return_value = [mock.sentinel.rbd_args] mock_execute.return_value = ( '{"0":{"pool":"pool","device":"/dev/rdb0","name":"image"},' '"1":{"pool":"pool","device":"/dev/rbd1","name":"fake_volume"}}', 'stderr') connector = rbd.RBDConnector(None) res = connector._find_root_device(self.connection_properties, mock.sentinel.conf) mock_args.assert_called_once_with(self.connection_properties, mock.sentinel.conf) mock_execute.assert_called_once_with( 'rbd', 'showmapped', '--format=json', mock.sentinel.rbd_args, root_helper=connector._root_helper, run_as_root=True) self.assertEqual('/dev/rbd1', res) @mock.patch.object(rbd.RBDConnector, '_check_valid_device') @mock.patch('os_brick.privileged.rbd.check_valid_path') @mock.patch.object(rbd, 'open') def test_check_valid_device_handle_no_path(self, mock_open, check_path, check_device): connector = rbd.RBDConnector(None) res = connector.check_valid_device(None) self.assertFalse(res) mock_open.assert_not_called() check_path.assert_not_called() check_device.assert_not_called() @ddt.data(True, False) @mock.patch.object(rbd.RBDConnector, '_check_valid_device') @mock.patch('os_brick.privileged.rbd.check_valid_path') @mock.patch.object(rbd, 'open') def test_check_valid_device_handle(self, run_as_root, mock_open, check_path, check_device): connector = rbd.RBDConnector(None) res = connector.check_valid_device(mock.sentinel.handle, run_as_root=run_as_root) check_device.assert_called_once_with(mock.sentinel.handle) self.assertIs(check_device.return_value, res) mock_open.assert_not_called() check_path.assert_not_called() @mock.patch.object(rbd.RBDConnector, '_check_valid_device') @mock.patch('os_brick.privileged.rbd.check_valid_path') @mock.patch.object(rbd, 'open') def test_check_valid_device_block_root(self, mock_open, check_path, check_device): connector = rbd.RBDConnector(None) path = '/dev/rbd0' res = connector.check_valid_device(path, run_as_root=True) check_path.assert_called_once_with(path) self.assertEqual(check_path.return_value, res) mock_open.assert_not_called() check_device.assert_not_called() @mock.patch.object(rbd.RBDConnector, '_check_valid_device') @mock.patch('os_brick.privileged.rbd.check_valid_path') @mock.patch.object(rbd, 'open') def test_check_valid_device_block_non_root(self, mock_open, check_path, check_device): connector = rbd.RBDConnector(None) path = '/dev/rbd0' res = connector.check_valid_device(path, run_as_root=False) mock_open.assert_called_once_with(path, 'rb') check_device.assert_called_once_with(mock_open().__enter__()) self.assertIs(check_device.return_value, res) check_path.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_remotefs.py0000664000175000017500000000637300000000000025777 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_brick.initiator.connectors import remotefs from os_brick.remotefs import remotefs as remotefs_client from os_brick.tests.initiator import test_connector class RemoteFsConnectorTestCase(test_connector.ConnectorTestCase): """Test cases for Remote FS initiator class.""" TEST_DEV = '172.18.194.100:/var/nfs' TEST_PATH = '/mnt/test/df0808229363aad55c27da50c38d6328' TEST_BASE = '/mnt/test' TEST_NAME = '9c592d52-ce47-4263-8c21-4ecf3c029cdb' def setUp(self): super(RemoteFsConnectorTestCase, self).setUp() self.connection_properties = { 'export': self.TEST_DEV, 'name': self.TEST_NAME} self.connector = remotefs.RemoteFsConnector( 'nfs', root_helper='sudo', nfs_mount_point_base=self.TEST_BASE, nfs_mount_options='vers=3') @mock.patch('os_brick.remotefs.remotefs.ScalityRemoteFsClient') def test_init_with_scality(self, mock_scality_remotefs_client): remotefs.RemoteFsConnector('scality', root_helper='sudo') self.assertEqual(1, mock_scality_remotefs_client.call_count) def test_get_connector_properties(self): props = remotefs.RemoteFsConnector.get_connector_properties( 'sudo', multipath=True, enforce_multipath=True) expected_props = {} self.assertEqual(expected_props, props) def test_get_search_path(self): expected = self.TEST_BASE actual = self.connector.get_search_path() self.assertEqual(expected, actual) @mock.patch.object(remotefs_client.RemoteFsClient, 'mount') def test_get_volume_paths(self, mock_mount): path = ("%(path)s/%(name)s" % {'path': self.TEST_PATH, 'name': self.TEST_NAME}) expected = [path] actual = self.connector.get_volume_paths(self.connection_properties) self.assertEqual(expected, actual) @mock.patch.object(remotefs_client.RemoteFsClient, 'mount') @mock.patch.object(remotefs_client.RemoteFsClient, 'get_mount_point', return_value="something") def test_connect_volume(self, mount_point_mock, mount_mock): """Test the basic connect volume case.""" self.connector.connect_volume(self.connection_properties) def test_disconnect_volume(self): """Nothing should happen here -- make sure it doesn't blow up.""" self.connector.disconnect_volume(self.connection_properties, {}) def test_extend_volume(self): self.assertRaises(NotImplementedError, self.connector.extend_volume, self.connection_properties) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_scaleio.py0000664000175000017500000004011600000000000025563 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from unittest import mock import requests from os_brick import exception from os_brick.initiator.connectors import scaleio from os_brick.tests.initiator import test_connector class ScaleIOConnectorTestCase(test_connector.ConnectorTestCase): """Test cases for ScaleIO connector.""" # Fake volume information vol = { 'id': 'vol1', 'name': 'test_volume', 'provider_id': 'vol1' } # Fake SDC GUID fake_guid = '013a5304-d053-4b30-a34f-ee3ad983236d' def setUp(self): super(ScaleIOConnectorTestCase, self).setUp() self.fake_connection_properties = { 'hostIP': test_connector.MY_IP, 'serverIP': test_connector.MY_IP, 'scaleIO_volname': self.vol['name'], 'scaleIO_volume_id': self.vol['provider_id'], 'serverPort': 443, 'serverUsername': 'test', 'config_group': 'test', 'failed_over': False, 'iopsLimit': None, 'bandwidthLimit': None } # Formatting string for REST API calls self.action_format = "instances/Volume::{}/action/{{}}".format( self.vol['id']) self.get_volume_api = 'types/Volume/instances/getByName::{}'.format( self.vol['name']) # Map of REST API calls to responses self.mock_calls = { self.get_volume_api: self.MockHTTPSResponse(json.dumps(self.vol['id'])), self.action_format.format('addMappedSdc'): self.MockHTTPSResponse(''), self.action_format.format('setMappedSdcLimits'): self.MockHTTPSResponse(''), self.action_format.format('removeMappedSdc'): self.MockHTTPSResponse(''), } # Default error REST response self.error_404 = self.MockHTTPSResponse(content=dict( errorCode=0, message='HTTP 404', ), status_code=404) # Patch the request and os calls to fake versions self.mock_object(requests, 'get', self.handle_scaleio_request) self.mock_object(requests, 'post', self.handle_scaleio_request) self.mock_object(os.path, 'isdir', return_value=True) self.mock_object(os, 'listdir', return_value=["emc-vol-{}".format(self.vol['id'])]) # Patch scaleio privileged calls self.get_password_mock = self.mock_object(scaleio.priv_scaleio, 'get_connector_password', return_value='fake_password') self.get_guid_mock = self.mock_object(scaleio.priv_scaleio, 'get_guid', return_value=self.fake_guid) self.rescan_vols_mock = self.mock_object(scaleio.priv_scaleio, 'rescan_vols') # The actual ScaleIO connector self.connector = scaleio.ScaleIOConnector( 'sudo', execute=self.fake_execute) class MockHTTPSResponse(requests.Response): """Mock HTTP Response Defines the https replies from the mocked calls to do_request() """ def __init__(self, content, status_code=200): super(ScaleIOConnectorTestCase.MockHTTPSResponse, self).__init__() self._content = content self.encoding = 'UTF-8' self.status_code = status_code def json(self, **kwargs): if isinstance(self._content, str): return super(ScaleIOConnectorTestCase.MockHTTPSResponse, self).json(**kwargs) return self._content @property def text(self): if not isinstance(self._content, str): return json.dumps(self._content) self._content = self._content.encode('utf-8') return super(ScaleIOConnectorTestCase.MockHTTPSResponse, self).text def handle_scaleio_request(self, url, *args, **kwargs): """Fake REST server""" api_call = url.split(':', 2)[2].split('/', 1)[1].replace('api/', '') if 'setMappedSdcLimits' in api_call: self.assertNotIn("iops_limit", kwargs['data']) if "iopsLimit" not in kwargs['data']: self.assertIn("bandwidthLimitInKbps", kwargs['data']) elif "bandwidthLimitInKbps" not in kwargs['data']: self.assertIn("iopsLimit", kwargs['data']) else: self.assertIn("bandwidthLimitInKbps", kwargs['data']) self.assertIn("iopsLimit", kwargs['data']) try: return self.mock_calls[api_call] except KeyError: return self.error_404 def test_get_search_path(self): expected = "/dev/disk/by-id" actual = self.connector.get_search_path() self.assertEqual(expected, actual) @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(scaleio.ScaleIOConnector, '_wait_for_volume_path') def test_get_volume_paths(self, mock_wait_for_path, mock_exists): mock_wait_for_path.return_value = "emc-vol-vol1" expected = ['/dev/disk/by-id/emc-vol-vol1'] actual = self.connector.get_volume_paths( self.fake_connection_properties) self.assertEqual(expected, actual) def test_get_connector_properties(self): props = scaleio.ScaleIOConnector.get_connector_properties( 'sudo', multipath=True, enforce_multipath=True) expected_props = {} self.assertEqual(expected_props, props) def test_connect_volume(self): """Successful connect to volume""" self.connector.connect_volume(self.fake_connection_properties) self.get_guid_mock.assert_called_once_with( self.connector.GET_GUID_OP_CODE) self.get_password_mock.assert_called_once() def test_connect_volume_old_connection_properties(self): """Successful connect to volume""" connection_properties = { 'hostIP': test_connector.MY_IP, 'serverIP': test_connector.MY_IP, 'scaleIO_volname': self.vol['name'], 'scaleIO_volume_id': self.vol['provider_id'], 'serverPort': 443, 'serverUsername': 'test', 'serverPassword': 'fake', 'serverToken': 'fake_token', 'iopsLimit': None, 'bandwidthLimit': None } self.connector.connect_volume(connection_properties) self.get_guid_mock.assert_called_once_with( self.connector.GET_GUID_OP_CODE) self.get_password_mock.assert_not_called() def test_connect_volume_without_volume_id(self): """Successful connect to volume without a Volume Id""" connection_properties = dict(self.fake_connection_properties) connection_properties.pop('scaleIO_volume_id') self.connector.connect_volume(connection_properties) self.get_guid_mock.assert_called_once_with( self.connector.GET_GUID_OP_CODE) def test_connect_with_bandwidth_limit(self): """Successful connect to volume with bandwidth limit""" self.fake_connection_properties['bandwidthLimit'] = '500' self.test_connect_volume() def test_connect_with_iops_limit(self): """Successful connect to volume with iops limit""" self.fake_connection_properties['iopsLimit'] = '80' self.test_connect_volume() def test_connect_with_iops_and_bandwidth_limits(self): """Successful connect with iops and bandwidth limits""" self.fake_connection_properties['bandwidthLimit'] = '500' self.fake_connection_properties['iopsLimit'] = '80' self.test_connect_volume() def test_disconnect_volume(self): """Successful disconnect from volume""" self.connector.disconnect_volume(self.fake_connection_properties, None) self.get_guid_mock.assert_called_once_with( self.connector.GET_GUID_OP_CODE) def test_disconnect_volume_without_volume_id(self): """Successful disconnect from volume without a Volume Id""" connection_properties = dict(self.fake_connection_properties) connection_properties.pop('scaleIO_volume_id') self.connector.disconnect_volume(connection_properties, None) self.get_guid_mock.assert_called_once_with( self.connector.GET_GUID_OP_CODE) def test_error_id(self): """Fail to connect with bad volume name""" self.fake_connection_properties['scaleIO_volume_id'] = 'bad_id' self.mock_calls[self.get_volume_api] = self.MockHTTPSResponse( dict(errorCode='404', message='Test volume not found'), 404) self.assertRaises(exception.BrickException, self.test_connect_volume) def test_error_no_volume_id(self): """Faile to connect with no volume id""" self.fake_connection_properties['scaleIO_volume_id'] = None self.mock_calls[self.get_volume_api] = self.MockHTTPSResponse( 'null', 200) self.assertRaises(exception.BrickException, self.test_connect_volume) def test_error_bad_login(self): """Fail to connect with bad authentication""" self.mock_calls[self.get_volume_api] = self.MockHTTPSResponse( 'null', 401) self.mock_calls['login'] = self.MockHTTPSResponse('null', 401) self.mock_calls[self.action_format.format( 'addMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=401, message='bad login'), 401) self.assertRaises(exception.BrickException, self.test_connect_volume) def test_error_map_volume(self): """Fail to connect with REST API failure""" self.mock_calls[self.action_format.format( 'addMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=self.connector.VOLUME_NOT_MAPPED_ERROR, message='Test error map volume'), 500) self.assertRaises(exception.BrickException, self.test_connect_volume) def test_error_map_volume_v4(self): """Fail to connect with REST API failure (v4)""" self.mock_calls[self.action_format.format( 'addMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=self.connector.VOLUME_NOT_MAPPED_ERROR_v4, message='Test error map volume'), 500) self.assertRaises(exception.BrickException, self.test_connect_volume) @mock.patch('os_brick.utils._time_sleep') def test_error_path_not_found(self, sleep_mock): """Timeout waiting for volume to map to local file system""" self.mock_object(os, 'listdir', return_value=["emc-vol-no-volume"]) self.assertRaises(exception.BrickException, self.test_connect_volume) self.assertTrue(sleep_mock.called) def test_map_volume_already_mapped(self): """Ignore REST API failure for volume already mapped""" self.mock_calls[self.action_format.format( 'addMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=self.connector.VOLUME_ALREADY_MAPPED_ERROR, message='Test error map volume'), 500) self.test_connect_volume() def test_map_volume_already_mapped_v4(self): """Ignore REST API failure for volume already mapped""" self.mock_calls[self.action_format.format( 'addMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=self.connector.VOLUME_ALREADY_MAPPED_ERROR_v4, message='Test error map volume'), 500) self.test_connect_volume() def test_error_disconnect_volume(self): """Fail to disconnect with REST API failure""" self.mock_calls[self.action_format.format( 'removeMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=self.connector.VOLUME_ALREADY_MAPPED_ERROR, message='Test error map volume'), 500) self.assertRaises(exception.BrickException, self.test_disconnect_volume) def test_disconnect_volume_not_mapped(self): """Ignore REST API failure for volume not mapped""" self.mock_calls[self.action_format.format( 'removeMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=self.connector.VOLUME_NOT_MAPPED_ERROR, message='Test error map volume'), 500) self.test_disconnect_volume() def test_disconnect_volume_not_mapped_v4(self): """Ignore REST API failure for volume not mapped (v4)""" self.mock_calls[self.action_format.format( 'removeMappedSdc')] = self.MockHTTPSResponse( dict(errorCode=self.connector.VOLUME_NOT_MAPPED_ERROR_v4, message='Test error map volume'), 500) self.test_disconnect_volume() @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(scaleio.ScaleIOConnector, '_find_volume_path') @mock.patch('os_brick.utils.get_device_size') def test_extend_volume(self, mock_device_size, mock_find_volume_path, mock_exists): mock_device_size.return_value = 16 mock_find_volume_path.return_value = "emc-vol-vol1" extended_size = self.connector.extend_volume( self.fake_connection_properties) self.assertEqual(extended_size, mock_device_size.return_value) self.rescan_vols_mock.assert_called_once_with( self.connector.RESCAN_VOLS_OP_CODE) def test_connection_properties_without_failed_over(self): """Handle connection properties with 'failed_over' missing""" connection_properties = dict(self.fake_connection_properties) connection_properties.pop('failed_over') self.connector.connect_volume(connection_properties) self.get_password_mock.assert_called_once_with( scaleio.CONNECTOR_CONF_PATH, connection_properties['config_group'], False) @mock.patch('tenacity.wait_exponential') @mock.patch.object(os.path, 'exists', return_value=True) def test_disconnect_volume_wait_for_path_not_removed(self, path_mock, wait_mock): self.fake_connection_properties['device_path'] = ('/dev/' 'disk/by-id/' 'emc-vol-' '00df72815d3b900f' '-d4f7289200000023') self.assertRaises(exception.BrickException, self.test_disconnect_volume) wait_mock.assert_called_once_with(multiplier=1, min=0, exp_base=1) @mock.patch('tenacity.wait_exponential') @mock.patch.object(os.path, 'exists', return_value=False) def test_disconnect_volume_wait_for_path_removed(self, path_mock, wait_mock): self.fake_connection_properties['device_path'] = ('/dev/' 'disk/by-id/' 'emc-vol-' '00df72815d3b900f' '-d4f7289200000023') self.test_disconnect_volume() wait_mock.assert_called_once_with(multiplier=1, min=0, exp_base=1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_storpool.py0000664000175000017500000003462400000000000026034 0ustar00zuulzuul00000000000000# Copyright (c) 2015 - 2017 StorPool # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from os_brick import exception from os_brick.initiator.connectors import storpool as connector from os_brick.initiator import storpool_utils from os_brick.tests.initiator import test_connector from os_brick.tests.initiator import test_storpool_utils def volumeNameExt(vid): return 'os--volume-{id}'.format(id=vid) def faulty_api(req): faulty_api.real_fn(req) if faulty_api.fail_count > 0: faulty_api.fail_count -= 1 raise storpool_utils.StorPoolAPIError( 500, { 'error': { 'name': 'busy', 'descr': "'os--volume--sp-vol-1' is open at client 19" } }) class StorPoolConnectorTestCase(test_connector.ConnectorTestCase): def volumeName(self, vid): return volumeNameExt(vid) def get_fake_size(self): return self.fakeSize def execute(self, *cmd, **kwargs): if cmd[0] == 'blockdev': self.assertEqual(len(cmd), 3) self.assertEqual(cmd[1], '--getsize64') self.assertEqual(cmd[2], '/dev/storpool/' + self.volumeName(self.fakeProp['volume'])) return (str(self.get_fake_size()) + '\n', None) raise Exception("Unrecognized command passed to " + type(self).__name__ + ".execute(): " + str.join(", ", map(lambda s: "'" + s + "'", cmd))) def setUp(self): super(StorPoolConnectorTestCase, self).setUp() self.fakeProp = { 'volume': 'sp-vol-1', 'client_id': 1, 'access_mode': 'rw' } self.fakeDeviceInfo = { 'path': '/dev/storpool/' + 'os--volume-' + 'sp-vol-1' } self.fakeGlobalId = 'OneNiceGlobalId' self.api_calls_retry_max = 10 self.fakeConnection = None self.fakeSize = 1024 * 1024 * 1024 self.reassign_wait_data = {'reassign': [ {'volume': volumeNameExt(self.fakeProp['volume']), 'detach': ['1'], 'force': False}]} with mock.patch( 'os_brick.initiator.storpool_utils.get_conf' ) as get_conf: get_conf.return_value = test_storpool_utils.SP_CONF self.connector = connector.StorPoolConnector( None, execute=self.execute) def test_raise_if_sp_ourid_missing(self): conf_no_sp_ourid = copy.deepcopy(test_storpool_utils.SP_CONF) del conf_no_sp_ourid['SP_OURID'] with mock.patch( 'os_brick.initiator.storpool_utils.get_conf' ) as get_conf: get_conf.return_value = conf_no_sp_ourid self.assertRaises(exception.BrickException, connector.StorPoolConnector, "") def test_connect_volume(self): volume_name = volumeNameExt(self.fakeProp['volume']) api = mock.MagicMock(spec=['volumes_reassign_wait', 'volume_get_info']) api.volumes_reassign_wait = mock.MagicMock(spec=['__call__']) api.volume_get_info = mock.Mock( return_value={"globalId": self.fakeGlobalId}) reassign_wait_expected = { 'reassign': [ { 'volume': 'os--volume-sp-vol-1', 'rw': ['1'] } ] } with mock.patch.object(self.connector, attribute='_sp_api', new=api): conn = self.connector.connect_volume(self.fakeProp) self.assertIn('type', conn) self.assertIn('path', conn) self.assertEqual(conn['path'], '/dev/storpool-byid/' + self.fakeGlobalId) self.assertEqual(len(api.volumes_reassign_wait.mock_calls), 1) self.assertEqual(api.volumes_reassign_wait.mock_calls[0], mock.call(reassign_wait_expected)) self.assertEqual(len(api.volume_get_info.mock_calls), 1) self.assertEqual(api.volume_get_info.mock_calls[0], mock.call(volume_name)) self.assertEqual(self.connector.get_search_path(), '/dev/storpool') paths = self.connector.get_volume_paths(self.fakeProp) self.assertEqual(len(paths), 1) self.assertEqual(paths[0], "/dev/storpool/" + self.volumeName(self.fakeProp['volume'])) self.fakeConnection = conn def test_disconnect_volume(self): if self.fakeConnection is None: self.test_connect_volume() api = mock.MagicMock(spec=['volumes_reassign_wait']) api.volumesReassignWait = mock.MagicMock(spec=['__call__']) with mock.patch.object(self.connector, attribute='_sp_api', new=api): self.connector.disconnect_volume(self.fakeProp, self.fakeDeviceInfo) self.assertEqual(api.volumes_reassign_wait.mock_calls[0], (mock.call(self.reassign_wait_data))) api.volumes_reassign_wait = mock.MagicMock(spec=['__call__']) fake_device_info = copy.deepcopy(self.fakeDeviceInfo) fake_device_info["path"] = \ "/dev/storpool-byid/" \ "byid-paths-map-to-volumes-with-a-tilde-prefix" rwd = copy.deepcopy(self.reassign_wait_data) rwd['reassign'][0]["volume"] =\ "~byid-paths-map-to-"\ "volumes-with-a-tilde-prefix" self.connector.disconnect_volume(self.fakeProp, fake_device_info) self.assertEqual(api.volumes_reassign_wait.mock_calls[0], (mock.call(rwd))) fake_device_info = copy.deepcopy(self.fakeDeviceInfo) del fake_device_info["path"] fake_prop = copy.deepcopy(self.fakeProp) fake_prop["device_path"] = \ "/dev/storpool-byid/" \ "byid-paths-map-to-volumes-with-a-tilde-prefix" rwd = copy.deepcopy(self.reassign_wait_data) rwd['reassign'][0]["volume"] =\ "~byid-paths-map-to-"\ "volumes-with-a-tilde-prefix" self.connector.disconnect_volume(fake_prop, fake_device_info) self.assertEqual(api.volumes_reassign_wait.mock_calls[0], (mock.call(rwd))) fake_device_info = copy.deepcopy(self.fakeDeviceInfo) fake_device_info["path"] = "/dev/invalid" self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, fake_device_info) fake_device_info = copy.deepcopy(self.fakeDeviceInfo) del fake_device_info["path"] self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, fake_device_info) self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, None) def test_connect_exceptions(self): """Raise exceptions on missing connection information""" api = mock.MagicMock(spec=['volumes_reassign_wait', 'volume_get_info']) api.volumes_reassign_wait = mock.MagicMock(spec=['__call__']) api.volume_get_info = mock.MagicMock(spec=['__call__']) with mock.patch.object(self.connector, attribute='_sp_api', new=api): for key in ['volume', 'client_id', 'access_mode']: fake_prop = copy.deepcopy(self.fakeProp) del fake_prop[key] self.assertRaises(exception.BrickException, self.connector.connect_volume, fake_prop) fake_prop = copy.deepcopy(self.fakeProp) del fake_prop['client_id'] self.assertRaises(exception.BrickException, self.connector.disconnect_volume, fake_prop, self.fakeDeviceInfo) fake_device_info = copy.deepcopy(self.fakeDeviceInfo) del fake_device_info['path'] self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, fake_device_info) def test_sp_ourid_exceptions(self): """Raise exceptions on missing SP_OURID""" conf_no_sp_ourid = copy.deepcopy(test_storpool_utils.SP_CONF) del conf_no_sp_ourid['SP_OURID'] with mock.patch( 'os_brick.initiator.storpool_utils.get_conf' ) as get_conf: conf_no_sp_ourid = copy.deepcopy(test_storpool_utils.SP_CONF) del conf_no_sp_ourid['SP_OURID'] get_conf.return_value = conf_no_sp_ourid self.assertRaises(exception.BrickException, self.connector.connect_volume, self.fakeProp) self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, self.fakeDeviceInfo) def test_sp_api_exceptions(self): """Handle SP API exceptions""" api = mock.MagicMock(spec=['volumes_reassign_wait', 'volume_get_info']) api.volumes_reassign_wait = mock.MagicMock(spec=['__call__']) # The generic exception should bypass the SP API exception handling api.volumes_reassign_wait.side_effect = Exception() api.volume_get_info = mock.MagicMock(spec=['__call__']) with mock.patch.object(self.connector, attribute='_sp_api', new=api): self.assertRaises(exception.BrickException, self.connector.connect_volume, self.fakeProp) self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, self.fakeDeviceInfo) api.volumes_reassign_wait.side_effect = "" api.volume_get_info = Exception() with mock.patch.object(self.connector, attribute='_sp_api', new=api): self.assertRaises(exception.BrickException, self.connector.connect_volume, self.fakeProp) self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, self.fakeDeviceInfo) # Test the retry logic def init_mock_api(retries): faulty_api.fail_count = retries faulty_api.real_fn = mock.MagicMock(spec=['__call__']) api.volumes_reassign_wait = faulty_api api.volume_get_info = mock.MagicMock(spec=['__call__']) init_mock_api(self.api_calls_retry_max - 1) with mock.patch.object(self.connector, attribute='_sp_api', new=api): self.connector.disconnect_volume(self.fakeProp, self.fakeDeviceInfo) self.assertEqual(self.api_calls_retry_max, len(faulty_api.real_fn.mock_calls)) for mock_call in faulty_api.real_fn.mock_calls: self.assertEqual(mock_call, mock.call(self.reassign_wait_data)) init_mock_api(self.api_calls_retry_max) with mock.patch.object(self.connector, attribute='_sp_api', new=api): rwd = copy.deepcopy(self.reassign_wait_data) self.connector.disconnect_volume(self.fakeProp, self.fakeDeviceInfo) self.assertEqual(self.api_calls_retry_max + 1, len(faulty_api.real_fn.mock_calls)) for mock_call in faulty_api.real_fn.mock_calls[:-1]: self.assertEqual(mock_call, mock.call(rwd)) rwd['reassign'][0]['force'] = True self.assertEqual(faulty_api.real_fn.mock_calls[-1], mock.call(rwd)) init_mock_api(self.api_calls_retry_max + 1) with mock.patch.object(self.connector, attribute='_sp_api', new=api): rwd = copy.deepcopy(self.reassign_wait_data) self.assertRaises(exception.BrickException, self.connector.disconnect_volume, self.fakeProp, self.fakeDeviceInfo) self.assertEqual(self.api_calls_retry_max + 1, len(faulty_api.real_fn.mock_calls)) for mock_call in faulty_api.real_fn.mock_calls[:-1]: self.assertEqual(mock_call, mock.call(rwd)) rwd['reassign'][0]['force'] = True self.assertEqual(faulty_api.real_fn.mock_calls[-1], mock.call(rwd)) def test_extend_volume(self): if self.fakeConnection is None: self.test_connect_volume() self.fakeSize += 1024 * 1024 * 1024 size_list = [self.fakeSize, self.fakeSize - 1, self.fakeSize - 2] vdata_list = [[{'size': self.fakeSize}]] def fake_volume_list(name): self.assertEqual(name, volumeNameExt(self.fakeProp['volume'])) return vdata_list.pop() api = mock.MagicMock(spec=['volume']) api.volume = mock.MagicMock(spec=['__call__']) api.volume.side_effect = fake_volume_list with mock.patch.object( self.connector, attribute='_sp_api', new=api ), mock.patch.object( self, attribute='get_fake_size', spec=['__call__'] ) as fake_size, mock.patch('time.sleep') as fake_sleep: fake_size.side_effect = size_list.pop newSize = self.connector.extend_volume(self.fakeProp) self.assertEqual(api.volume.call_count, 1) self.assertListEqual(vdata_list, []) self.assertEqual(fake_size.call_count, 3) self.assertListEqual(size_list, []) self.assertEqual(fake_sleep.call_count, 2) self.assertEqual(newSize, self.fakeSize) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/connectors/test_vmware.py0000664000175000017500000004352400000000000025453 0ustar00zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import units from oslo_vmware.objects import datastore from oslo_vmware import vim_util from os_brick import exception from os_brick.initiator.connectors import vmware from os_brick.tests.initiator import test_connector @ddt.ddt class VmdkConnectorTestCase(test_connector.ConnectorTestCase): IP = '127.0.0.1' PORT = 443 USERNAME = 'username' PASSWORD = 'password' API_RETRY_COUNT = 3 TASK_POLL_INTERVAL = 5.0 CA_FILE = "/etc/ssl/rui-ca-cert.pem" TMP_DIR = "/vmware-tmp" IMG_TX_TIMEOUT = 10 VMDK_CONNECTOR = vmware.VmdkConnector def setUp(self): super(VmdkConnectorTestCase, self).setUp() self._connector = vmware.VmdkConnector(None) self._connector._ip = self.IP self._connector._port = self.PORT self._connector._username = self.USERNAME self._connector._password = self.PASSWORD self._connector._api_retry_count = self.API_RETRY_COUNT self._connector._task_poll_interval = self.TASK_POLL_INTERVAL self._connector._ca_file = self.CA_FILE self._connector._insecure = True self._connector._tmp_dir = self.TMP_DIR self._connector._timeout = self.IMG_TX_TIMEOUT def test_load_config(self): config = { 'vmware_host_ip': 'localhost', 'vmware_host_port': 1234, 'vmware_host_username': 'root', 'vmware_host_password': 'pswd', 'vmware_api_retry_count': 1, 'vmware_task_poll_interval': 1.0, 'vmware_ca_file': None, 'vmware_insecure': False, 'vmware_tmp_dir': '/tmp', 'vmware_image_transfer_timeout_secs': 5, } self._connector._load_config({'config': config}) self.assertEqual('localhost', self._connector._ip) self.assertEqual(1234, self._connector._port) self.assertEqual('root', self._connector._username) self.assertEqual('pswd', self._connector._password) self.assertEqual(1, self._connector._api_retry_count) self.assertEqual(1.0, self._connector._task_poll_interval) self.assertIsNone(self._connector._ca_file) self.assertFalse(self._connector._insecure) self.assertEqual('/tmp', self._connector._tmp_dir) self.assertEqual(5, self._connector._timeout) @mock.patch('oslo_vmware.api.VMwareAPISession') def test_create_session(self, session): session.return_value = mock.sentinel.session ret = self._connector._create_session() self.assertEqual(mock.sentinel.session, ret) session.assert_called_once_with( self._connector._ip, self._connector._username, self._connector._password, self._connector._api_retry_count, self._connector._task_poll_interval, port=self._connector._port, cacert=self._connector._ca_file, insecure=self._connector._insecure) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('tempfile.mkstemp') @mock.patch('os.close') def test_create_temp_file( self, close, mkstemp, ensure_tree): fd = mock.sentinel.fd tmp = mock.sentinel.tmp mkstemp.return_value = (fd, tmp) prefix = ".vmdk" suffix = "test" ret = self._connector._create_temp_file(prefix=prefix, suffix=suffix) self.assertEqual(tmp, ret) ensure_tree.assert_called_once_with(self._connector._tmp_dir) mkstemp.assert_called_once_with(dir=self._connector._tmp_dir, prefix=prefix, suffix=suffix) close.assert_called_once_with(fd) @mock.patch('os_brick.initiator.connectors.vmware.open', create=True) @mock.patch('oslo_vmware.image_transfer.copy_stream_optimized_disk') def test_download_vmdk(self, copy_disk, file_open): file_open_ret = mock.Mock() tmp_file = mock.sentinel.tmp_file file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) file_open_ret.__exit__ = mock.Mock(return_value=None) file_open.return_value = file_open_ret tmp_file_path = mock.sentinel.tmp_file_path session = mock.sentinel.session backing = mock.sentinel.backing vmdk_path = mock.sentinel.vmdk_path vmdk_size = mock.sentinel.vmdk_size self._connector._download_vmdk( tmp_file_path, session, backing, vmdk_path, vmdk_size) file_open.assert_called_once_with(tmp_file_path, 'wb') copy_disk.assert_called_once_with(None, self._connector._timeout, tmp_file, session=session, host=self._connector._ip, port=self._connector._port, vm=backing, vmdk_file_path=vmdk_path, vmdk_size=vmdk_size) def _create_connection_properties(self): return {'volume_id': 'ed083474-d325-4a99-b301-269111654f0d', 'volume': 'ref-1', 'vmdk_path': '[ds] foo/bar.vmdk', 'vmdk_size': units.Gi, 'datastore': 'ds-1', 'datacenter': 'dc-1', } @mock.patch.object(VMDK_CONNECTOR, '_load_config') @mock.patch.object(VMDK_CONNECTOR, '_create_session') @mock.patch.object(VMDK_CONNECTOR, '_create_temp_file') @mock.patch('oslo_vmware.vim_util.get_moref') @mock.patch.object(VMDK_CONNECTOR, '_download_vmdk') @mock.patch('os.path.getmtime') def test_connect_volume( self, getmtime, download_vmdk, get_moref, create_temp_file, create_session, load_config): session = mock.Mock() create_session.return_value = session tmp_file_path = mock.sentinel.tmp_file_path create_temp_file.return_value = tmp_file_path backing = mock.sentinel.backing get_moref.return_value = backing last_modified = mock.sentinel.last_modified getmtime.return_value = last_modified props = self._create_connection_properties() ret = self._connector.connect_volume(props) self.assertEqual(tmp_file_path, ret['path']) self.assertEqual(last_modified, ret['last_modified']) load_config.assert_called_once_with(props) create_session.assert_called_once_with() create_temp_file.assert_called_once_with( suffix=".vmdk", prefix=props['volume_id']) download_vmdk.assert_called_once_with( tmp_file_path, session, backing, props['vmdk_path'], props['vmdk_size']) session.logout.assert_called_once_with() @ddt.data((None, False), ([mock.sentinel.snap], True)) @ddt.unpack def test_snapshot_exists(self, snap_list, exp_return_value): snapshot = mock.Mock(rootSnapshotList=snap_list) session = mock.Mock() session.invoke_api.return_value = snapshot backing = mock.sentinel.backing ret = self._connector._snapshot_exists(session, backing) self.assertEqual(exp_return_value, ret) session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', session.vim, backing, 'snapshot') def test_create_temp_ds_folder(self): session = mock.Mock() ds_folder_path = mock.sentinel.ds_folder_path dc_ref = mock.sentinel.dc_ref self._connector._create_temp_ds_folder(session, ds_folder_path, dc_ref) session.invoke_api.assert_called_once_with( session.vim, 'MakeDirectory', session.vim.service_content.fileManager, name=ds_folder_path, datacenter=dc_ref) @mock.patch('oslo_vmware.objects.datastore.get_datastore_by_ref') @mock.patch.object(VMDK_CONNECTOR, '_create_temp_ds_folder') @mock.patch('os_brick.initiator.connectors.vmware.open', create=True) @mock.patch.object(VMDK_CONNECTOR, '_upload_vmdk') @mock.patch('os.path.getsize') @mock.patch.object(VMDK_CONNECTOR, '_get_disk_device') @mock.patch.object(VMDK_CONNECTOR, '_detach_disk_from_backing') @mock.patch.object(VMDK_CONNECTOR, '_attach_disk_to_backing') def test_disconnect( self, attach_disk_to_backing, detach_disk_from_backing, get_disk_device, getsize, upload_vmdk, file_open, create_temp_ds_folder, get_ds_by_ref): ds_ref = mock.sentinel.ds_ref ds_name = 'datastore-1' dstore = datastore.Datastore(ds_ref, ds_name) get_ds_by_ref.return_value = dstore file_open_ret = mock.Mock() tmp_file = mock.sentinel.tmp_file file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) file_open_ret.__exit__ = mock.Mock(return_value=None) file_open.return_value = file_open_ret dc_name = mock.sentinel.dc_name copy_task = mock.sentinel.copy_vdisk_task delete_file_task = mock.sentinel.delete_file_task session = mock.Mock() session.invoke_api.side_effect = [dc_name, copy_task, delete_file_task] getsize.return_value = units.Gi disk_device = mock.sentinel.disk_device get_disk_device.return_value = disk_device backing = mock.sentinel.backing tmp_file_path = '/tmp/foo.vmdk' dc_ref = mock.sentinel.dc_ref vmdk_path = mock.sentinel.vmdk_path self._connector._disconnect( backing, tmp_file_path, session, ds_ref, dc_ref, vmdk_path) tmp_folder_path = self._connector.TMP_IMAGES_DATASTORE_FOLDER_PATH ds_folder_path = '[%s] %s' % (ds_name, tmp_folder_path) create_temp_ds_folder.assert_called_once_with( session, ds_folder_path, dc_ref) file_open.assert_called_once_with(tmp_file_path, "rb") self.assertEqual( mock.call(vim_util, 'get_object_property', session.vim, dc_ref, 'name'), session.invoke_api.call_args_list[0]) exp_rel_path = '%s/foo.vmdk' % tmp_folder_path upload_vmdk.assert_called_once_with( tmp_file, self._connector._ip, self._connector._port, dc_name, ds_name, session.vim.client.cookiejar, exp_rel_path, units.Gi, self._connector._ca_file, self._connector._timeout) get_disk_device.assert_called_once_with(session, backing) detach_disk_from_backing.assert_called_once_with( session, backing, disk_device) src = '[%s] %s' % (ds_name, exp_rel_path) disk_mgr = session.vim.service_content.virtualDiskManager self.assertEqual( mock.call(session.vim, 'CopyVirtualDisk_Task', disk_mgr, sourceName=src, sourceDatacenter=dc_ref, destName=vmdk_path, destDatacenter=dc_ref), session.invoke_api.call_args_list[1]) self.assertEqual(mock.call(copy_task), session.wait_for_task.call_args_list[0]) attach_disk_to_backing.assert_called_once_with( session, backing, disk_device) file_mgr = session.vim.service_content.fileManager self.assertEqual( mock.call(session.vim, 'DeleteDatastoreFile_Task', file_mgr, name=src, datacenter=dc_ref), session.invoke_api.call_args_list[2]) self.assertEqual(mock.call(delete_file_task), session.wait_for_task.call_args_list[1]) @mock.patch('os.path.exists') def test_disconnect_volume_with_missing_temp_file(self, path_exists): path_exists.return_value = False path = mock.sentinel.path self.assertRaises(exception.NotFound, self._connector.disconnect_volume, mock.ANY, {'path': path}) path_exists.assert_called_once_with(path) @mock.patch('os.path.exists') @mock.patch('os.path.getmtime') @mock.patch.object(VMDK_CONNECTOR, '_disconnect') @mock.patch('os.remove') def test_disconnect_volume_with_unmodified_file( self, remove, disconnect, getmtime, path_exists): path_exists.return_value = True mtime = 1467802060 getmtime.return_value = mtime path = mock.sentinel.path self._connector.disconnect_volume(mock.ANY, {'path': path, 'last_modified': mtime}) path_exists.assert_called_once_with(path) getmtime.assert_called_once_with(path) disconnect.assert_not_called() remove.assert_called_once_with(path) @mock.patch('os.path.exists') @mock.patch('os.path.getmtime') @mock.patch.object(VMDK_CONNECTOR, '_load_config') @mock.patch.object(VMDK_CONNECTOR, '_create_session') @mock.patch('oslo_vmware.vim_util.get_moref') @mock.patch.object(VMDK_CONNECTOR, '_snapshot_exists') @mock.patch.object(VMDK_CONNECTOR, '_disconnect') @mock.patch('os.remove') def test_disconnect_volume( self, remove, disconnect, snapshot_exists, get_moref, create_session, load_config, getmtime, path_exists): path_exists.return_value = True mtime = 1467802060 getmtime.return_value = mtime session = mock.Mock() create_session.return_value = session snapshot_exists.return_value = False backing = mock.sentinel.backing ds_ref = mock.sentinel.ds_ref dc_ref = mock.sentinel.dc_ref get_moref.side_effect = [backing, ds_ref, dc_ref] props = self._create_connection_properties() path = mock.sentinel.path self._connector.disconnect_volume(props, {'path': path, 'last_modified': mtime - 1}) path_exists.assert_called_once_with(path) getmtime.assert_called_once_with(path) load_config.assert_called_once_with(props) create_session.assert_called_once_with() snapshot_exists.assert_called_once_with(session, backing) disconnect.assert_called_once_with( backing, path, session, ds_ref, dc_ref, props['vmdk_path']) remove.assert_called_once_with(path) session.logout.assert_called_once_with() def test_get_disk_device(self): disk_device = mock.Mock() disk_device.__class__.__name__ = 'VirtualDisk' controller_device = mock.Mock() controller_device.__class__.__name__ = 'VirtualLSILogicController' devices = mock.Mock() devices.__class__.__name__ = "ArrayOfVirtualDevice" devices.VirtualDevice = [disk_device, controller_device] session = mock.Mock() session.invoke_api.return_value = devices backing = mock.sentinel.backing self.assertEqual(disk_device, self._connector._get_disk_device(session, backing)) session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', session.vim, backing, 'config.hardware.device') def test_create_spec_for_disk_remove(self): disk_spec = mock.Mock() session = mock.Mock() session.vim.client.factory.create.return_value = disk_spec disk_device = mock.sentinel.disk_device self._connector._create_spec_for_disk_remove(session, disk_device) session.vim.client.factory.create.assert_called_once_with( 'ns0:VirtualDeviceConfigSpec') self.assertEqual('remove', disk_spec.operation) self.assertEqual('destroy', disk_spec.fileOperation) self.assertEqual(disk_device, disk_spec.device) @mock.patch.object(VMDK_CONNECTOR, '_create_spec_for_disk_remove') @mock.patch.object(VMDK_CONNECTOR, '_reconfigure_backing') def test_detach_disk_from_backing(self, reconfigure_backing, create_spec): disk_spec = mock.sentinel.disk_spec create_spec.return_value = disk_spec reconfig_spec = mock.Mock() session = mock.Mock() session.vim.client.factory.create.return_value = reconfig_spec backing = mock.sentinel.backing disk_device = mock.sentinel.disk_device self._connector._detach_disk_from_backing( session, backing, disk_device) create_spec.assert_called_once_with(session, disk_device) session.vim.client.factory.create.assert_called_once_with( 'ns0:VirtualMachineConfigSpec') self.assertEqual([disk_spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with( session, backing, reconfig_spec) @mock.patch.object(VMDK_CONNECTOR, '_reconfigure_backing') def test_attach_disk_to_backing(self, reconfigure_backing): reconfig_spec = mock.Mock() disk_spec = mock.Mock() session = mock.Mock() session.vim.client.factory.create.side_effect = [ reconfig_spec, disk_spec] backing = mock.Mock() disk_device = mock.sentinel.disk_device self._connector._attach_disk_to_backing(session, backing, disk_device) self.assertEqual([disk_spec], reconfig_spec.deviceChange) self.assertEqual('add', disk_spec.operation) self.assertEqual(disk_device, disk_spec.device) reconfigure_backing.assert_called_once_with( session, backing, reconfig_spec) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/test_connector.py0000664000175000017500000003265200000000000023767 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import platform import sys from unittest import mock import ddt from oslo_concurrency import processutils as putils from oslo_service import loopingcall from os_brick import exception from os_brick.initiator import connector from os_brick.initiator.connectors import base from os_brick.initiator.connectors import fake from os_brick.initiator.connectors import iscsi from os_brick.initiator.connectors import nvmeof from os_brick.initiator import linuxfc from os_brick.privileged import nvmeof as priv_nvmeof from os_brick.privileged import rootwrap as priv_rootwrap from os_brick.tests import base as test_base from os_brick import utils MY_IP = '10.0.0.1' FAKE_SCSI_WWN = '1234567890' class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall): def start(self, interval, initial_delay=None, stop_on_exception=True): return super(ZeroIntervalLoopingCall, self).start( 0, 0, stop_on_exception) class ConnectorUtilsTestCase(test_base.TestCase): @mock.patch.object(nvmeof.NVMeOFConnector, '_is_native_multipath_supported', return_value=False) @mock.patch.object(priv_nvmeof, 'get_system_uuid', return_value=None) @mock.patch.object(nvmeof.NVMeOFConnector, '_get_host_uuid', return_value=None) @mock.patch.object(utils, 'get_host_nqn', return_value=None) @mock.patch.object(iscsi.ISCSIConnector, 'get_initiator', return_value='fakeinitiator') @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_wwpns', return_value=None) @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_wwnns', return_value=None) @mock.patch.object(platform, 'machine', mock.Mock(return_value='s390x')) @mock.patch('sys.platform', 'linux2') @mock.patch.object(utils, 'get_nvme_host_id', mock.Mock(return_value=None)) def _test_brick_get_connector_properties(self, multipath, enforce_multipath, multipath_result, mock_wwnns, mock_wwpns, mock_initiator, mock_nqn, mock_hostuuid, mock_sysuuid, mock_native_multipath_supported, host='fakehost'): props_actual = connector.get_connector_properties('sudo', MY_IP, multipath, enforce_multipath, host=host) os_type = 'linux2' platform = 's390x' props = {'initiator': 'fakeinitiator', 'host': host, 'ip': MY_IP, 'multipath': multipath_result, 'enforce_multipath': enforce_multipath, 'nvme_native_multipath': False, 'os_type': os_type, 'platform': platform, 'do_local_attach': False} self.assertEqual(props, props_actual) def test_brick_get_connector_properties_connectors_called(self): """Make sure every connector is called.""" mock_list = [] # Make sure every connector is called for item in connector._get_connector_list(): patched = mock.MagicMock() patched.platform = platform.machine() patched.os_type = sys.platform patched.__name__ = item patched.get_connector_properties.return_value = {} patcher = mock.patch(item, new=patched) patcher.start() self.addCleanup(patcher.stop) mock_list.append(patched) connector.get_connector_properties('sudo', MY_IP, True, True) for item in mock_list: assert item.get_connector_properties.called def test_brick_get_connector_properties(self): self._test_brick_get_connector_properties(False, False, False) @mock.patch.object(priv_rootwrap, 'custom_execute', side_effect=OSError(2)) def test_brick_get_connector_properties_multipath(self, mock_custom_execute): self._test_brick_get_connector_properties(True, True, True) mock_custom_execute.assert_called_once_with('nvme', 'version') @mock.patch.object(priv_rootwrap, 'custom_execute', side_effect=OSError(2)) def test_brick_get_connector_properties_fallback(self, mock_custom_execute): self._test_brick_get_connector_properties(True, False, True) mock_custom_execute.assert_called_once_with('nvme', 'version') def test_brick_connector_properties_override_hostname(self): override_host = 'myhostname' self._test_brick_get_connector_properties(False, False, False, host=override_host) @ddt.ddt class ConnectorTestCase(test_base.TestCase): def setUp(self): super(ConnectorTestCase, self).setUp() self.cmds = [] self.mock_object(loopingcall, 'FixedIntervalLoopingCall', ZeroIntervalLoopingCall) def fake_execute(self, *cmd, **kwargs): self.cmds.append(" ".join(cmd)) return "", None def fake_connection(self): return { 'driver_volume_type': 'fake', 'data': { 'volume_id': 'fake_volume_id', 'target_portal': 'fake_location', 'target_iqn': 'fake_iqn', 'target_lun': 1, } } def test_connect_volume(self): self.connector = fake.FakeConnector(None) device_info = self.connector.connect_volume(self.fake_connection()) self.assertIn('type', device_info) self.assertIn('path', device_info) def test_disconnect_volume(self): self.connector = fake.FakeConnector(None) def test_get_connector_properties(self): with mock.patch.object(priv_rootwrap, 'execute') as mock_exec: mock_exec.return_value = ('', '') multipath = True enforce_multipath = True props = base.BaseLinuxConnector.get_connector_properties( 'sudo', multipath=multipath, enforce_multipath=enforce_multipath) expected_props = { 'multipath': multipath, 'enforce_multipath': enforce_multipath, } self.assertEqual(expected_props, props) multipath = False enforce_multipath = True props = base.BaseLinuxConnector.get_connector_properties( 'sudo', multipath=multipath, enforce_multipath=enforce_multipath) expected_props = { 'multipath': multipath, 'enforce_multipath': enforce_multipath, } self.assertEqual(expected_props, props) @mock.patch('sys.platform', 'win32') def test_get_connector_mapping_win32(self): mapping_win32 = connector.get_connector_mapping() self.assertIn('ISCSI', mapping_win32) self.assertIn('RBD', mapping_win32) self.assertNotIn('STORPOOL', mapping_win32) @mock.patch('os_brick.initiator.connector.platform.machine') def test_get_connector_mapping(self, mock_platform_machine): mock_platform_machine.return_value = 'x86_64' mapping_x86 = connector.get_connector_mapping() mock_platform_machine.return_value = 'ppc64le' mapping_ppc = connector.get_connector_mapping() self.assertNotEqual(mapping_x86, mapping_ppc) mock_platform_machine.return_value = 's390x' mapping_s390 = connector.get_connector_mapping() self.assertNotEqual(mapping_x86, mapping_s390) self.assertNotEqual(mapping_ppc, mapping_s390) def test_factory(self): obj = connector.InitiatorConnector.factory('iscsi', None) self.assertEqual("ISCSIConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory('iscsi', None, arch='ppc64le') self.assertEqual("ISCSIConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory('fibre_channel', None, arch='x86_64') self.assertEqual("FibreChannelConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory('fibre_channel', None, arch='s390x') self.assertEqual("FibreChannelConnectorS390X", obj.__class__.__name__) obj = connector.InitiatorConnector.factory( 'nfs', None, nfs_mount_point_base='/mnt/test') self.assertEqual("RemoteFsConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory( 'glusterfs', None, glusterfs_mount_point_base='/mnt/test', arch='x86_64') self.assertEqual("RemoteFsConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory( 'scality', None, scality_mount_point_base='/mnt/test', arch='x86_64') self.assertEqual("RemoteFsConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory('local', None) self.assertEqual("LocalConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory('gpfs', None) self.assertEqual("GPFSConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory( 'huaweisdshypervisor', None, arch='x86_64') self.assertEqual("HuaweiStorHyperConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory( "scaleio", None, arch='x86_64') self.assertEqual("ScaleIOConnector", obj.__class__.__name__) obj = connector.InitiatorConnector.factory( 'quobyte', None, quobyte_mount_point_base='/mnt/test', arch='x86_64') self.assertEqual("RemoteFsConnector", obj.__class__.__name__) self.assertRaises(exception.InvalidConnectorProtocol, connector.InitiatorConnector.factory, "bogus", None) def test_check_valid_device_with_wrong_path(self): self.connector = fake.FakeConnector(None) self.connector._execute = \ lambda *args, **kwargs: ("", None) self.assertFalse(self.connector.check_valid_device('/d0v')) def test_check_valid_device(self): self.connector = fake.FakeConnector(None) self.connector._execute = \ lambda *args, **kwargs: ("", "") self.assertTrue(self.connector.check_valid_device('/dev')) def test_check_valid_device_with_cmd_error(self): def raise_except(*args, **kwargs): raise putils.ProcessExecutionError self.connector = fake.FakeConnector(None) with mock.patch.object(self.connector, '_execute', side_effect=putils.ProcessExecutionError): self.assertFalse(self.connector.check_valid_device('/dev')) @ddt.data( (False, False, False), (False, False, True), (False, True, False), (False, True, True), (True, False, False), (True, False, True), (True, True, False), (True, True, True), ) @ddt.unpack @mock.patch.object(base, 'LOG') @mock.patch.object(base.BaseLinuxConnector, 'supports_multipath') def test_check_multipath( self, mpath, enforce_mpath, supports_mpath, mock_supports_mpath, mock_log): self.connector = fake.FakeConnector(None) fake_conn_props = self.fake_connection() self.connector.use_multipath = mpath mock_supports_mpath.return_value = supports_mpath fake_conn_props['data']['enforce_multipath'] = enforce_mpath if mpath and enforce_mpath and not supports_mpath: self.assertRaises( exception.BrickException, self.connector.check_multipath, fake_conn_props['data'] ) else: self.connector.check_multipath(fake_conn_props['data']) if mpath and not enforce_mpath and not supports_mpath: mock_log.warning.assert_called_once_with( "Multipathing is requested but the host " "doesn't support multipathing." ) mock_supports_mpath.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/test_host_driver.py0000664000175000017500000000326600000000000024324 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Scality # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from unittest import mock from os_brick.initiator import host_driver from os_brick.tests import base class HostDriverTestCase(base.TestCase): def test_get_all_block_devices(self): fake_dev = ['device1', 'device2'] expected = ['/dev/disk/by-path/' + dev for dev in fake_dev] driver = host_driver.HostDriver() with mock.patch('os.listdir', return_value=fake_dev): actual = driver.get_all_block_devices() self.assertEqual(expected, actual) def test_get_all_block_devices_when_oserror_is_enoent(self): driver = host_driver.HostDriver() oserror = OSError(errno.ENOENT, "") with mock.patch('os.listdir', side_effect=oserror): block_devices = driver.get_all_block_devices() self.assertEqual([], block_devices) def test_get_all_block_devices_when_oserror_is_not_enoent(self): driver = host_driver.HostDriver() oserror = OSError(errno.ENOMEM, "") with mock.patch('os.listdir', side_effect=oserror): self.assertRaises(OSError, driver.get_all_block_devices) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/test_linuxfc.py0000664000175000017500000007233600000000000023450 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from unittest import mock import ddt from os_brick.initiator import linuxfc from os_brick.tests import base @ddt.ddt class LinuxFCTestCase(base.TestCase): def setUp(self): super(LinuxFCTestCase, self).setUp() self.cmds = [] self.mock_object(os.path, 'exists', return_value=True) self.mock_object(os.path, 'isdir', return_value=True) self.lfc = linuxfc.LinuxFibreChannel(None, execute=self.fake_execute) def fake_execute(self, *cmd, **kwargs): self.cmds.append(" ".join(cmd)) return "", None @staticmethod def __get_rescan_info(zone_manager=False): connection_properties = { 'initiator_target_map': {'50014380186af83c': ['514f0c50023f6c00'], '50014380186af83e': ['514f0c50023f6c01']}, 'initiator_target_lun_map': { '50014380186af83c': [('514f0c50023f6c00', 1)], '50014380186af83e': [('514f0c50023f6c01', 1)] }, 'target_discovered': False, 'target_lun': 1, 'target_wwn': ['514f0c50023f6c00', '514f0c50023f6c01'], 'targets': [ ('514f0c50023f6c00', 1), ('514f0c50023f6c01', 1), ] } hbas = [ {'device_path': ('/sys/devices/pci0000:00/0000:00:02.0/' '0000:04:00.0/host6/fc_host/host6'), 'host_device': 'host6', 'node_name': '50014380186af83d', 'port_name': '50014380186af83c'}, {'device_path': ('/sys/devices/pci0000:00/0000:00:02.0/' '0000:04:00.1/host7/fc_host/host7'), 'host_device': 'host7', 'node_name': '50014380186af83f', 'port_name': '50014380186af83e'}, ] if not zone_manager: del connection_properties['initiator_target_map'] del connection_properties['initiator_target_lun_map'] return hbas, connection_properties @staticmethod def _get_expected_info(wwpns=["514f0c50023f6c00", "514f0c50023f6c01"], targets=1, remote_scan=False): execute_results = [] expected_cmds = [] for i in range(0, targets): expected_cmds += [ mock.call(f'grep -Gil "{wwpns[i]}" ' '/sys/class/fc_transport/target6:*/port_name', shell=True) ] if remote_scan: execute_results += [ # We can only perform remote ports scan if the # fc_transport path returns empty output ('', ''), # This is returned from the fc_remote_ports path ('/sys/class/fc_remote_ports/' f'rport-6:0-{i+1}/port_name\n', ''), # noqa: E226 ] expected_cmds += [ mock.call(f'grep -Gil "{wwpns[i]}" ' '/sys/class/fc_remote_ports/rport-6:*/port_name', shell=True), ] else: execute_results += [ ('/sys/class/fc_transport/' f'target6:0:{i+1}/port_name\n', '') # noqa: E226 ] return execute_results, expected_cmds @mock.patch('builtins.open') @ddt.data(True, False) def test__get_hba_channel_scsi_target_lun_single_wwpn( self, remote_scan, mock_open): execute_results, expected_cmds = self._get_expected_info( remote_scan=remote_scan) if remote_scan: mock_open = mock_open.return_value.__enter__.return_value mock_open.read.return_value = ('1\n') hbas, con_props = self.__get_rescan_info() con_props['target_wwn'] = con_props['target_wwn'][0] con_props['targets'] = con_props['targets'][0:1] with mock.patch.object(self.lfc, '_execute', side_effect=execute_results) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1]], set()) self.assertEqual(expected, res) @mock.patch('builtins.open') @ddt.data(True, False) def test__get_hba_channel_scsi_target_lun_with_initiator_target_map( self, remote_scan, mock_open): execute_results, expected_cmds = self._get_expected_info( wwpns=["514f0c50023f6c01"]) if remote_scan: mock_open = mock_open.return_value.__enter__.return_value mock_open.read.return_value = ('1\n') hbas, con_props = self.__get_rescan_info(zone_manager=True) con_props['target_wwn'] = con_props['target_wwn'][0] con_props['targets'] = con_props['targets'][0:1] hbas[0]['port_name'] = '50014380186af83e' with mock.patch.object(self.lfc, '_execute', side_effect=execute_results) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1]], set()) self.assertEqual(expected, res) @mock.patch('builtins.open') @ddt.data(True, False) def test__get_hba_channel_scsi_target_lun_with_initiator_target_map_none( self, remote_scan, mock_open): execute_results, expected_cmds = self._get_expected_info() if remote_scan: mock_open = mock_open.return_value.__enter__.return_value mock_open.read.return_value = ('1\n') hbas, con_props = self.__get_rescan_info() con_props['target_wwn'] = con_props['target_wwn'][0] con_props['targets'] = con_props['targets'][0:1] con_props['initiator_target_map'] = None hbas[0]['port_name'] = '50014380186af83e' with mock.patch.object(self.lfc, '_execute', side_effect=execute_results) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1]], set()) self.assertEqual(expected, res) @mock.patch('builtins.open') @ddt.data(True, False) def test__get_hba_channel_scsi_target_lun_multiple_wwpn( self, remote_scan, mock_open): execute_results, expected_cmds = self._get_expected_info(targets=2) if remote_scan: mock_open = mock_open.return_value.__enter__.return_value mock_open.read.return_value = ('1\n') hbas, con_props = self.__get_rescan_info() with mock.patch.object(self.lfc, '_execute', side_effect=execute_results) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1], ['0', '2', 1]], set()) self.assertEqual(expected, res) @mock.patch('builtins.open') @ddt.data(True, False) def test__get_hba_channel_scsi_target_lun_multiple_wwpn_and_luns( self, remote_scan, mock_open): execute_results, expected_cmds = self._get_expected_info(targets=2) if remote_scan: mock_open = mock_open.return_value.__enter__.return_value mock_open.read.return_value = ('1\n') hbas, con_props = self.__get_rescan_info() con_props['target_lun'] = [1, 7] con_props['targets'] = [ ('514f0c50023f6c00', 1), ('514f0c50023f6c01', 7), ] with mock.patch.object(self.lfc, '_execute', side_effect=execute_results) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1], ['0', '2', 7]], set()) self.assertEqual(expected, res) @mock.patch('builtins.open') @ddt.data(True, False) def test__get_hba_channel_scsi_target_lun_zone_manager( self, remote_scan, mock_open): execute_results, expected_cmds = self._get_expected_info() if remote_scan: mock_open = mock_open.return_value.__enter__.return_value mock_open.read.return_value = ('1\n') hbas, con_props = self.__get_rescan_info(zone_manager=True) with mock.patch.object(self.lfc, '_execute', side_effect=execute_results) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1]], set()) self.assertEqual(expected, res) def test__get_hba_channel_scsi_target_lun_both_paths_not_found(self): _, expected_cmds = self._get_expected_info() hbas, con_props = self.__get_rescan_info(zone_manager=True) with mock.patch.object(self.lfc, '_execute', return_value=('', '')) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) self.assertEqual(([], {1}), res) def test__get_hba_channel_scsi_target_lun_exception(self): _, expected_cmds = self._get_expected_info() hbas, con_props = self.__get_rescan_info(zone_manager=True) with mock.patch.object(self.lfc, '_execute', side_effect=Exception) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) self.assertEqual(([], {1}), res) def test__get_hba_channel_scsi_target_lun_fc_transport_exception(self): execute_effects = [ ('/sys/class/fc_transport/target6:0:1/port_name\n', ''), Exception() ] _, expected_cmds = self._get_expected_info() hbas, con_props = self.__get_rescan_info() with mock.patch.object(self.lfc, '_execute', side_effect=execute_effects) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1]], {1}) self.assertEqual(expected, res) @mock.patch('builtins.open') def test__get_hba_channel_scsi_target_lun_fc_remote_ports_exception( self, mock_open): execute_effects = [ ('', ''), ('/sys/class/fc_remote_ports/rport-6:0-1/port_name\n', ''), ('', ''), Exception() ] mock_open = mock_open.return_value.__enter__.return_value mock_open.read.return_value = ('1\n') _, expected_cmds = self._get_expected_info() hbas, con_props = self.__get_rescan_info() with mock.patch.object(self.lfc, '_execute', side_effect=execute_effects) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1]], {1}) self.assertEqual(expected, res) @mock.patch('builtins.open') def test__get_hba_channel_scsi_target_open_oserror( self, mock_open): execute_effects, expected_cmds = self._get_expected_info( targets=2, remote_scan=True) mock_open = mock_open.return_value.__enter__.return_value mock_open.read.side_effect = ['1\n', OSError()] hbas, con_props = self.__get_rescan_info() with mock.patch.object(self.lfc, '_execute', side_effect=execute_effects) as execute_mock: res = self.lfc._get_hba_channel_scsi_target_lun(hbas[0], con_props) execute_mock.assert_has_calls(expected_cmds) expected = ([['0', '1', 1]], {1}) self.assertEqual(expected, res) def test__get_target_fc_transport_path(self): path = '/sys/class/fc_transport/target6:' execute_results = ('/sys/class/fc_transport/target6:0:1/port_name\n', '') _, con_props = self.__get_rescan_info() with mock.patch.object(self.lfc, '_execute', return_value=execute_results) as execute_mock: ctl = self.lfc._get_target_fc_transport_path( path, con_props['target_wwn'][0], 1) execute_mock.assert_called_once_with( 'grep -Gil "514f0c50023f6c00" ' '/sys/class/fc_transport/target6:*/port_name', shell=True) self.assertEqual(['0', '1', 1], ctl) @mock.patch('builtins.open') def test__get_target_fc_remote_ports_path(self, mock_open): path = '/sys/class/fc_remote_ports/rport-6:' execute_results = [ ('/sys/class/fc_remote_ports/rport-6:0-1/port_name\n', ''), ('1\n', ''), ] scsi_target_path = ( '/sys/class/fc_remote_ports/rport-6:0-1/scsi_target_id') mock_open.return_value.__enter__.return_value.read.return_value = ( '1\n') hbas, con_props = self.__get_rescan_info() with mock.patch.object(self.lfc, '_execute', side_effect=execute_results) as execute_mock: ctl = self.lfc._get_target_fc_remote_ports_path( path, con_props['target_wwn'][0], 1) expected_cmds = [ mock.call( 'grep -Gil "514f0c50023f6c00" ' '/sys/class/fc_remote_ports/rport-6:*/port_name', shell=True), ] execute_mock.assert_has_calls(expected_cmds) mock_open.assert_called_once_with(scsi_target_path) self.assertEqual(['0', '1', 1], ctl) def test_rescan_hosts_initiator_map(self): """Test FC rescan with initiator map and not every HBA connected.""" get_chan_results = [([['2', '3', 1], ['4', '5', 1]], set()), ([['6', '7', 1]], set())] hbas, con_props = self.__get_rescan_info(zone_manager=True) # This HBA is not in the initiator map, so we should not scan it or try # to get the channel and target hbas.append({'device_path': ('/sys/devices/pci0000:00/0000:00:02.0/' '0000:04:00.2/host8/fc_host/host8'), 'host_device': 'host8', 'node_name': '50014380186af83g', 'port_name': '50014380186af83h'}) with mock.patch.object(self.lfc, '_execute', return_value=None) as execute_mock, \ mock.patch.object(self.lfc, '_get_hba_channel_scsi_target_lun', side_effect=get_chan_results) as mock_get_chan: self.lfc.rescan_hosts(hbas, con_props) expected_commands = [ mock.call('tee', '-a', '/sys/class/scsi_host/host6/scan', process_input='2 3 1', root_helper=None, run_as_root=True), mock.call('tee', '-a', '/sys/class/scsi_host/host6/scan', process_input='4 5 1', root_helper=None, run_as_root=True), mock.call('tee', '-a', '/sys/class/scsi_host/host7/scan', process_input='6 7 1', root_helper=None, run_as_root=True)] execute_mock.assert_has_calls(expected_commands) self.assertEqual(len(expected_commands), execute_mock.call_count) expected_calls = [mock.call(hbas[0], con_props), mock.call(hbas[1], con_props)] mock_get_chan.assert_has_calls(expected_calls) @mock.patch.object(linuxfc.LinuxFibreChannel, 'lun_for_addressing') def test_rescan_hosts_single_wwnn(self, lun_addr_mock): """Test FC rescan with no initiator map and single WWNN for ports.""" lun_addr_mock.return_value = 16640 get_chan_results = [ [[['2', '3', 256], ['4', '5', 256]], set()], [[['6', '7', 256]], set()], [[], {1}], ] hbas, con_props = self.__get_rescan_info(zone_manager=False) con_props['addressing_mode'] = 'SAM2' # This HBA is the one that is not included in the single WWNN. hbas.append({'device_path': ('/sys/devices/pci0000:00/0000:00:02.0/' '0000:04:00.2/host8/fc_host/host8'), 'host_device': 'host8', 'node_name': '50014380186af83g', 'port_name': '50014380186af83h'}) with mock.patch.object(self.lfc, '_execute', return_value=None) as execute_mock, \ mock.patch.object(self.lfc, '_get_hba_channel_scsi_target_lun', side_effect=get_chan_results) as mock_get_chan: self.lfc.rescan_hosts(hbas, con_props) expected_commands = [ mock.call('tee', '-a', '/sys/class/scsi_host/host6/scan', process_input='2 3 16640', root_helper=None, run_as_root=True), mock.call('tee', '-a', '/sys/class/scsi_host/host6/scan', process_input='4 5 16640', root_helper=None, run_as_root=True), mock.call('tee', '-a', '/sys/class/scsi_host/host7/scan', process_input='6 7 16640', root_helper=None, run_as_root=True)] execute_mock.assert_has_calls(expected_commands) self.assertEqual(len(expected_commands), execute_mock.call_count) expected_calls = [mock.call(hbas[0], con_props), mock.call(hbas[1], con_props)] mock_get_chan.assert_has_calls(expected_calls) lun_addr_mock.assert_has_calls([mock.call(256, 'SAM2')] * 3) def test_rescan_hosts_initiator_map_single_wwnn(self): """Test FC rescan with initiator map and single WWNN.""" get_chan_results = [([['2', '3', 1], ['4', '5', 1]], set()), ([], {1})] hbas, con_props = self.__get_rescan_info(zone_manager=True) with mock.patch.object(self.lfc, '_execute', return_value=None) as execute_mock, \ mock.patch.object(self.lfc, '_get_hba_channel_scsi_target_lun', side_effect=get_chan_results) as mock_get_chan: self.lfc.rescan_hosts(hbas, con_props) expected_commands = [ mock.call('tee', '-a', '/sys/class/scsi_host/host6/scan', process_input='2 3 1', root_helper=None, run_as_root=True), mock.call('tee', '-a', '/sys/class/scsi_host/host6/scan', process_input='4 5 1', root_helper=None, run_as_root=True)] execute_mock.assert_has_calls(expected_commands) self.assertEqual(len(expected_commands), execute_mock.call_count) expected_calls = [mock.call(hbas[0], con_props), mock.call(hbas[1], con_props)] mock_get_chan.assert_has_calls(expected_calls) def test_rescan_hosts_port_not_found(self): """Test when we don't find the target ports.""" get_chan_results = [([], {1}), ([], {1})] hbas, con_props = self.__get_rescan_info(zone_manager=True) # Remove the initiator map con_props.pop('initiator_target_map') con_props.pop('initiator_target_lun_map') with mock.patch.object(self.lfc, '_get_hba_channel_scsi_target_lun', side_effect=get_chan_results), \ mock.patch.object(self.lfc, '_execute', side_effect=None) as execute_mock: self.lfc.rescan_hosts(hbas, con_props) expected_commands = [ mock.call('tee', '-a', '/sys/class/scsi_host/host6/scan', process_input='- - 1', root_helper=None, run_as_root=True), mock.call('tee', '-a', '/sys/class/scsi_host/host7/scan', process_input='- - 1', root_helper=None, run_as_root=True)] execute_mock.assert_has_calls(expected_commands) self.assertEqual(len(expected_commands), execute_mock.call_count) def test_rescan_hosts_port_not_found_driver_disables_wildcards(self): """Test when we don't find the target ports but driver forces scan.""" get_chan_results = [([], {1}), ([], {1})] hbas, con_props = self.__get_rescan_info(zone_manager=True) con_props['enable_wildcard_scan'] = False with mock.patch.object(self.lfc, '_get_hba_channel_scsi_target_lun', side_effect=get_chan_results), \ mock.patch.object(self.lfc, '_execute', side_effect=None) as execute_mock: self.lfc.rescan_hosts(hbas, con_props) execute_mock.assert_not_called() @mock.patch('glob.glob', return_value=[]) def test_get_fc_hbas_no_hbas(self, mock_glob): hbas = self.lfc.get_fc_hbas() self.assertListEqual([], hbas) mock_glob.assert_called_once_with('/sys/class/fc_host/*') @mock.patch('os.path.realpath') @mock.patch('glob.glob', return_value=['/sys/class/fc_host/host0', '/sys/class/fc_host/host2']) @mock.patch('builtins.open', side_effect=IOError) def test_get_fc_hbas_fail(self, mock_open, mock_glob, mock_path): hbas = self.lfc.get_fc_hbas() mock_glob.assert_called_once_with('/sys/class/fc_host/*') self.assertListEqual([], hbas) self.assertEqual(2, mock_open.call_count) mock_open.assert_has_calls( (mock.call('/sys/class/fc_host/host0/port_name', 'rt'), mock.call('/sys/class/fc_host/host2/port_name', 'rt')) ) self.assertEqual(2, mock_path.call_count) mock_path.assert_has_calls( (mock.call('/sys/class/fc_host/host0'), mock.call('/sys/class/fc_host/host2')) ) @mock.patch('os.path.realpath') @mock.patch('glob.glob', return_value=['/sys/class/fc_host/host0', '/sys/class/fc_host/host2']) @mock.patch('builtins.open') def test_get_fc_hbas(self, mock_open, mock_glob, mock_path): mock_open.return_value.__enter__.return_value.read.side_effect = [ '0x50014380242b9750\n', '0x50014380242b9751\n', 'Online', '0x50014380242b9752\n', '0x50014380242b9753\n', 'Online', ] pci_path = '/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.' host0_pci = f'{pci_path}0/host0/fc_host/host0' host2_pci = f'{pci_path}1/host2/fc_host/host2' mock_path.side_effect = [host0_pci, host2_pci] hbas = self.lfc.get_fc_hbas() expected = [ {'ClassDevice': 'host0', 'ClassDevicepath': host0_pci, 'port_name': '0x50014380242b9750', 'node_name': '0x50014380242b9751', 'port_state': 'Online'}, {'ClassDevice': 'host2', 'ClassDevicepath': host2_pci, 'port_name': '0x50014380242b9752', 'node_name': '0x50014380242b9753', 'port_state': 'Online'}, ] self.assertListEqual(expected, hbas) mock_glob.assert_called_once_with('/sys/class/fc_host/*') self.assertEqual(6, mock_open.call_count) mock_open.assert_has_calls( (mock.call('/sys/class/fc_host/host0/port_name', 'rt'), mock.call('/sys/class/fc_host/host0/node_name', 'rt'), mock.call('/sys/class/fc_host/host0/port_state', 'rt'), mock.call('/sys/class/fc_host/host2/port_name', 'rt'), mock.call('/sys/class/fc_host/host2/node_name', 'rt'), mock.call('/sys/class/fc_host/host2/port_state', 'rt')), any_order=True, ) self.assertEqual(2, mock_path.call_count) mock_path.assert_has_calls( (mock.call('/sys/class/fc_host/host0'), mock.call('/sys/class/fc_host/host2')) ) def _set_get_fc_hbas(self): pci_path = '/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.' host0_pci = f'{pci_path}0/host0/fc_host/host0' host2_pci = f'{pci_path}1/host2/fc_host/host2' return_value = [{'ClassDevice': 'host0', 'ClassDevicepath': host0_pci, 'port_name': '0x50014380242b9750', 'node_name': '0x50014380242b9751', 'port_state': 'Online'}, {'ClassDevice': 'host2', 'ClassDevicepath': host2_pci, 'port_name': '0x50014380242b9752', 'node_name': '0x50014380242b9753', 'port_state': 'Online'}] mocked = self.mock_object(linuxfc.LinuxFibreChannel, 'get_fc_hbas', return_value=return_value) return mocked def test_get_fc_hbas_info(self): mock_hbas = self._set_get_fc_hbas() hbas_info = self.lfc.get_fc_hbas_info() expected_info = [{'device_path': '/sys/devices/pci0000:20/' '0000:20:03.0/0000:21:00.0/' 'host0/fc_host/host0', 'host_device': 'host0', 'node_name': '50014380242b9751', 'port_name': '50014380242b9750'}, {'device_path': '/sys/devices/pci0000:20/' '0000:20:03.0/0000:21:00.1/' 'host2/fc_host/host2', 'host_device': 'host2', 'node_name': '50014380242b9753', 'port_name': '50014380242b9752'}, ] self.assertListEqual(expected_info, hbas_info) mock_hbas.assert_called_once_with() def test_get_fc_wwpns(self): self._set_get_fc_hbas() wwpns = self.lfc.get_fc_wwpns() expected_wwpns = ['50014380242b9750', '50014380242b9752'] self.assertEqual(expected_wwpns, wwpns) def test_get_fc_wwnns(self): self._set_get_fc_hbas() wwnns = self.lfc.get_fc_wwnns() expected_wwnns = ['50014380242b9751', '50014380242b9753'] self.assertEqual(expected_wwnns, wwnns) class LinuxFCS390XTestCase(LinuxFCTestCase): def setUp(self): super(LinuxFCS390XTestCase, self).setUp() self.cmds = [] self.lfc = linuxfc.LinuxFibreChannelS390X(None, execute=self.fake_execute) @mock.patch.object(linuxfc.LinuxFibreChannel, 'get_fc_hbas') def test_get_fc_hbas_info(self, mock_hbas): host_pci = '/sys/devices/css0/0.0.02ea/0.0.3080/host0/fc_host/host0' mock_hbas.return_value = [{'ClassDevice': 'host0', 'ClassDevicepath': host_pci, 'port_name': '0xc05076ffe680a960', 'node_name': '0x1234567898765432', 'port_state': 'Online'}] hbas_info = self.lfc.get_fc_hbas_info() expected = [{'device_path': '/sys/devices/css0/0.0.02ea/' '0.0.3080/host0/fc_host/host0', 'host_device': 'host0', 'node_name': '1234567898765432', 'port_name': 'c05076ffe680a960'}] self.assertEqual(expected, hbas_info) @mock.patch.object(os.path, 'exists', return_value=False) def test_configure_scsi_device(self, mock_execute): device_number = "0.0.2319" target_wwn = "0x50014380242b9751" lun = 1 self.lfc.configure_scsi_device(device_number, target_wwn, lun) expected_commands = [('tee -a /sys/bus/ccw/drivers/zfcp/0.0.2319/' 'port_rescan'), ('tee -a /sys/bus/ccw/drivers/zfcp/0.0.2319/' '0x50014380242b9751/unit_add')] self.assertEqual(expected_commands, self.cmds) def test_deconfigure_scsi_device(self): device_number = "0.0.2319" target_wwn = "0x50014380242b9751" lun = 1 self.lfc.deconfigure_scsi_device(device_number, target_wwn, lun) expected_commands = [('tee -a /sys/bus/ccw/drivers/zfcp/' '0.0.2319/0x50014380242b9751/unit_remove')] self.assertEqual(expected_commands, self.cmds) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/test_linuxrbd.py0000664000175000017500000002371200000000000023621 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock from os_brick import exception from os_brick.initiator import linuxrbd from os_brick.tests import base from os_brick import utils class MockRados(object): class Error(Exception): pass class ioctx(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def close(self, *args, **kwargs): pass class Rados(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def connect(self, *args, **kwargs): pass def open_ioctx(self, *args, **kwargs): return MockRados.ioctx() def shutdown(self, *args, **kwargs): pass class RBDClientTestCase(base.TestCase): def setUp(self): super(RBDClientTestCase, self).setUp() @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.rados') def test_with_client(self, mock_rados, mock_rbd): with linuxrbd.RBDClient('test_user', 'test_pool') as client: # Verify object attributes are assigned as expected self.assertEqual('/etc/ceph/ceph.conf', client.rbd_conf) self.assertEqual(utils.convert_str('test_user'), client.rbd_user) self.assertEqual(utils.convert_str('test_pool'), client.rbd_pool) # Assert connect is called with correct paramaters mock_rados.Rados.assert_called_once_with( clustername='ceph', rados_id=utils.convert_str('test_user'), conffile='/etc/ceph/ceph.conf') # Ensure correct calls to connect to cluster self.assertEqual( 1, mock_rados.Rados.return_value.connect.call_count) mock_rados.Rados.return_value.open_ioctx.assert_called_once_with( utils.convert_str('test_pool')) self.assertEqual(1, mock_rados.Rados.return_value.shutdown.call_count) @mock.patch.object(MockRados.Rados, 'connect', side_effect=MockRados.Error) def test_with_client_error(self, _): linuxrbd.rados = MockRados linuxrbd.rados.Error = MockRados.Error def test(): with linuxrbd.RBDClient('test_user', 'test_pool'): pass self.assertRaises(exception.BrickException, test) class InvalidArgument(Exception): pass class RBDVolumeIOWrapperTestCase(base.TestCase): def setUp(self): super(RBDVolumeIOWrapperTestCase, self).setUp() self.mock_volume = mock.Mock() self.mock_volume_wrapper = \ linuxrbd.RBDVolumeIOWrapper(self.mock_volume) self.data_length = 1024 self.full_data = 'abcd' * 256 self._rbd_lib = self.patch('os_brick.initiator.linuxrbd.rbd') self._rbd_lib.InvalidArgument = InvalidArgument def test_init(self): self.assertEqual(self.mock_volume, self.mock_volume_wrapper._rbd_volume) self.assertEqual(0, self.mock_volume_wrapper._offset) def test_inc_offset(self): self.mock_volume_wrapper._inc_offset(10) self.mock_volume_wrapper._inc_offset(10) self.assertEqual(20, self.mock_volume_wrapper._offset) def test_read(self): def mock_read(offset, length): return self.full_data[offset:length] self.mock_volume.image.read.side_effect = mock_read self.mock_volume.image.size.return_value = self.data_length data = self.mock_volume_wrapper.read() self.assertEqual(self.full_data, data) data = self.mock_volume_wrapper.read() self.assertEqual(b'', data) self.mock_volume_wrapper.seek(0) data = self.mock_volume_wrapper.read() self.assertEqual(self.full_data, data) self.mock_volume_wrapper.seek(0) data = self.mock_volume_wrapper.read(10) self.assertEqual(self.full_data[:10], data) def test_write(self): self.mock_volume_wrapper.write(self.full_data) self.assertEqual(1024, self.mock_volume_wrapper._offset) def test_seekable(self): self.assertTrue(self.mock_volume_wrapper.seekable) def test_seek(self): self.assertEqual(0, self.mock_volume_wrapper._offset) self.mock_volume_wrapper.seek(10) self.assertEqual(10, self.mock_volume_wrapper._offset) self.mock_volume_wrapper.seek(10) self.assertEqual(10, self.mock_volume_wrapper._offset) self.mock_volume_wrapper.seek(10, 1) self.assertEqual(20, self.mock_volume_wrapper._offset) self.mock_volume_wrapper.seek(0) self.mock_volume_wrapper.write(self.full_data) self.mock_volume.image.size.return_value = self.data_length self.mock_volume_wrapper.seek(0) self.assertEqual(0, self.mock_volume_wrapper._offset) self.mock_volume_wrapper.seek(10, 2) self.assertEqual(self.data_length + 10, self.mock_volume_wrapper._offset) self.mock_volume_wrapper.seek(-10, 2) self.assertEqual(self.data_length - 10, self.mock_volume_wrapper._offset) # test exceptions. self.assertRaises(IOError, self.mock_volume_wrapper.seek, 0, 3) self.assertRaises(IOError, self.mock_volume_wrapper.seek, -1) # offset should not have been changed by any of the previous # operations. self.assertEqual(self.data_length - 10, self.mock_volume_wrapper._offset) def test_tell(self): self.assertEqual(0, self.mock_volume_wrapper.tell()) self.mock_volume_wrapper._inc_offset(10) self.assertEqual(10, self.mock_volume_wrapper.tell()) def test_flush(self): with mock.patch.object(linuxrbd, 'LOG') as mock_logger: self.mock_volume.image.flush = mock.Mock() self.mock_volume_wrapper.flush() self.assertEqual(1, self.mock_volume.image.flush.call_count) self.mock_volume.image.require_not_closed.assert_called_once_with() self.mock_volume.image.flush.reset_mock() self.mock_volume.image.require_not_closed.reset_mock() # this should be caught and logged silently. self.mock_volume.image.flush.side_effect = AttributeError self.mock_volume_wrapper.flush() self.assertEqual(1, self.mock_volume.image.flush.call_count) self.assertEqual(1, mock_logger.warning.call_count) self.mock_volume.image.require_not_closed.assert_called_once_with() def test_flush_closed_image(self): """Test when image is closed but wrapper isn't""" with mock.patch.object(linuxrbd, 'LOG') as mock_logger: self.mock_volume.image.require_not_closed.side_effect = \ InvalidArgument self.mock_volume.image.flush = mock.Mock() self.mock_volume_wrapper.flush() self.mock_volume.image.flush.assert_not_called() self.assertEqual(1, mock_logger.warning.call_count) log_msg = mock_logger.warning.call_args[0][0] self.assertTrue( log_msg.startswith("RBDVolumeIOWrapper's underlying image")) self.mock_volume.image.require_not_closed.assert_called_once_with() def test_flush_on_closed(self): self.mock_volume_wrapper.close() self.mock_volume.image.flush.assert_called_once_with() self.assertTrue(self.mock_volume_wrapper.closed) self.mock_volume.image.flush.reset_mock() self.assertRaises(ValueError, self.mock_volume_wrapper.flush) self.mock_volume.image.flush.assert_not_called() self.mock_volume.image.require_not_closed.assert_called_once_with() def test_flush_on_image_closed(self): self.mock_volume.image.require_not_closed.side_effect = InvalidArgument self.mock_volume_wrapper.close() self.mock_volume.image.flush.assert_not_called() self.assertTrue(self.mock_volume_wrapper.closed) self.mock_volume.image.close.assert_called_once_with() self.mock_volume.image.require_not_closed.assert_called_once_with() def test_fileno(self): self.assertRaises(IOError, self.mock_volume_wrapper.fileno) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.rados') @mock.patch.object(linuxrbd.RBDClient, 'disconnect') def test_close(self, rbd_disconnect, mock_rados, mock_rbd): rbd_client = linuxrbd.RBDClient('user', 'pool') rbd_volume = linuxrbd.RBDVolume(rbd_client, 'volume') rbd_handle = linuxrbd.RBDVolumeIOWrapper( linuxrbd.RBDImageMetadata(rbd_volume, 'pool', 'user', None)) with mock.patch.object(rbd_volume, 'closed', False): rbd_handle.close() self.assertEqual(1, rbd_disconnect.call_count) # Confirm the handle now reports that is closed (this attribute cannot # be modified directly) self.assertTrue(rbd_handle.closed) # New call to close shouldn't create additional calls rbd_handle.close() self.assertEqual(1, rbd_disconnect.call_count) class RBDVolumeTestCase(base.TestCase): def test_name_attribute(self): mock_client = mock.Mock() rbd_volume = linuxrbd.RBDVolume(mock_client, 'volume') self.assertEqual('volume', rbd_volume.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/test_linuxscsi.py0000664000175000017500000021161700000000000024016 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import os.path import textwrap from unittest import mock import ddt from oslo_concurrency import processutils as putils from oslo_log import log as logging from os_brick import exception from os_brick.initiator import linuxscsi from os_brick.tests import base LOG = logging.getLogger(__name__) @ddt.ddt class LinuxSCSITestCase(base.TestCase): def setUp(self): super(LinuxSCSITestCase, self).setUp() self.cmds = [] self.realpath = os.path.realpath self.mock_object(os.path, 'realpath', return_value='/dev/sdc') self.mock_object(os, 'stat', returns=os.stat(__file__)) self.linuxscsi = linuxscsi.LinuxSCSI(None, execute=self.fake_execute) def fake_execute(self, *cmd, **kwargs): self.cmds.append(" ".join(cmd)) return "", None def test_echo_scsi_command(self): self.linuxscsi.echo_scsi_command("/some/path", "1") expected_commands = ['tee -a /some/path'] self.assertEqual(expected_commands, self.cmds) @mock.patch.object(os.path, 'realpath') def test_get_name_from_path(self, realpath_mock): device_name = "/dev/sdc" realpath_mock.return_value = device_name disk_path = ("/dev/disk/by-path/ip-10.10.220.253:3260-" "iscsi-iqn.2000-05.com.3pardata:21810002ac00383d-lun-0") name = self.linuxscsi.get_name_from_path(disk_path) self.assertEqual(device_name, name) disk_path = ("/dev/disk/by-path/pci-0000:00:00.0-ip-10.9.8.7:3260-" "iscsi-iqn.2000-05.com.openstack:2180002ac00383d-lun-0") name = self.linuxscsi.get_name_from_path(disk_path) self.assertEqual(device_name, name) realpath_mock.return_value = "bogus" name = self.linuxscsi.get_name_from_path(disk_path) self.assertIsNone(name) @mock.patch.object(os.path, 'exists', return_value=False) def test_remove_scsi_device(self, exists_mock): self.linuxscsi.remove_scsi_device("/dev/sdc") expected_commands = [] self.assertEqual(expected_commands, self.cmds) exists_mock.return_value = True self.linuxscsi.remove_scsi_device("/dev/sdc") expected_commands = [ ('blockdev --flushbufs /dev/sdc'), ('tee -a /sys/block/sdc/device/delete')] self.assertEqual(expected_commands, self.cmds) @mock.patch.object(linuxscsi.LinuxSCSI, 'echo_scsi_command') @mock.patch.object(linuxscsi.LinuxSCSI, 'flush_device_io') @mock.patch.object(os.path, 'exists', return_value=True) def test_remove_scsi_device_force(self, exists_mock, flush_mock, echo_mock): """With force we'll always call delete even if flush fails.""" exc = exception.ExceptionChainer() flush_mock.side_effect = Exception() echo_mock.side_effect = Exception() device = '/dev/sdc' self.linuxscsi.remove_scsi_device(device, force=True, exc=exc) # The context manager has caught the exceptions self.assertTrue(exc) flush_mock.assert_called_once_with(device) echo_mock.assert_called_once_with('/sys/block/sdc/device/delete', '1') @mock.patch.object(os.path, 'exists', return_value=False) def test_remove_scsi_device_no_flush(self, exists_mock): self.linuxscsi.remove_scsi_device("/dev/sdc") expected_commands = [] self.assertEqual(expected_commands, self.cmds) exists_mock.return_value = True self.linuxscsi.remove_scsi_device("/dev/sdc", flush=False) expected_commands = [('tee -a /sys/block/sdc/device/delete')] self.assertEqual(expected_commands, self.cmds) @mock.patch('os_brick.utils._time_sleep') @mock.patch('os.path.exists', return_value=True) def test_wait_for_volumes_removal_failure(self, exists_mock, sleep_mock): retries = 61 names = ('sda', 'sdb') self.assertRaises(exception.VolumePathNotRemoved, self.linuxscsi.wait_for_volumes_removal, names) exists_mock.assert_has_calls([mock.call('/dev/' + name) for name in names] * retries) self.assertEqual(retries - 1, sleep_mock.call_count) @mock.patch('os_brick.utils._time_sleep') @mock.patch('os.path.exists', side_effect=(True, True, False, False)) def test_wait_for_volumes_removal_retry(self, exists_mock, sleep_mock): names = ('sda', 'sdb') self.linuxscsi.wait_for_volumes_removal(names) exists_mock.assert_has_calls([mock.call('/dev/' + name) for name in names] * 2) self.assertEqual(1, sleep_mock.call_count) def test_flush_multipath_device(self): dm_map_name = '3600d0230000000000e13955cc3757800' with mock.patch.object(self.linuxscsi, '_execute') as exec_mock: self.linuxscsi.flush_multipath_device(dm_map_name) exec_mock.assert_called_once_with( 'multipath', '-f', dm_map_name, run_as_root=True, attempts=3, timeout=300, interval=10, root_helper=self.linuxscsi._root_helper) def test_get_scsi_wwn(self): fake_path = '/dev/disk/by-id/somepath' fake_wwn = '1234567890' def fake_execute(*cmd, **kwargs): return fake_wwn, None self.linuxscsi._execute = fake_execute wwn = self.linuxscsi.get_scsi_wwn(fake_path) self.assertEqual(fake_wwn, wwn) @mock.patch('builtins.open') def test_get_dm_name(self, open_mock): dm_map_name = '3600d0230000000000e13955cc3757800' cm_open = open_mock.return_value.__enter__.return_value cm_open.read.return_value = dm_map_name res = self.linuxscsi.get_dm_name('dm-0') self.assertEqual(dm_map_name, res) open_mock.assert_called_once_with('/sys/block/dm-0/dm/name') @mock.patch('builtins.open', side_effect=IOError) def test_get_dm_name_failure(self, open_mock): self.assertEqual('', self.linuxscsi.get_dm_name('dm-0')) @mock.patch('glob.glob', side_effect=[[], ['/sys/block/sda/holders/dm-9']]) def test_find_sysfs_multipath_dm(self, glob_mock): device_names = ('sda', 'sdb') res = self.linuxscsi.find_sysfs_multipath_dm(device_names) self.assertEqual('dm-9', res) glob_mock.assert_has_calls([mock.call('/sys/block/sda/holders/dm-*'), mock.call('/sys/block/sdb/holders/dm-*')]) @mock.patch('glob.glob', return_value=[]) def test_find_sysfs_multipath_dm_not_found(self, glob_mock): device_names = ('sda', 'sdb') res = self.linuxscsi.find_sysfs_multipath_dm(device_names) self.assertIsNone(res) glob_mock.assert_has_calls([mock.call('/sys/block/sda/holders/dm-*'), mock.call('/sys/block/sdb/holders/dm-*')]) @mock.patch.object(linuxscsi.LinuxSCSI, '_execute') @mock.patch('os.path.exists', return_value=True) def test_flush_device_io(self, exists_mock, exec_mock): device = '/dev/sda' self.linuxscsi.flush_device_io(device) exists_mock.assert_called_once_with(device) exec_mock.assert_called_once_with( 'blockdev', '--flushbufs', device, run_as_root=True, attempts=3, timeout=300, interval=10, root_helper=self.linuxscsi._root_helper) @mock.patch('os.path.exists', return_value=False) def test_flush_device_io_non_existent(self, exists_mock): device = '/dev/sda' self.linuxscsi.flush_device_io(device) exists_mock.assert_called_once_with(device) @mock.patch.object(os.path, 'exists', return_value=True) def test_find_multipath_device_path(self, exists_mock): fake_wwn = '1234567890' found_path = self.linuxscsi.find_multipath_device_path(fake_wwn) expected_path = '/dev/disk/by-id/dm-uuid-mpath-%s' % fake_wwn self.assertEqual(expected_path, found_path) @mock.patch('os_brick.utils._time_sleep') @mock.patch.object(os.path, 'exists') def test_find_multipath_device_path_mapper(self, exists_mock, sleep_mock): # the wait loop tries 3 times before it gives up # we want to test failing to find the # /dev/disk/by-id/dm-uuid-mpath- path # but finding the # /dev/mapper/ path exists_mock.side_effect = [False, False, False, True] fake_wwn = '1234567890' found_path = self.linuxscsi.find_multipath_device_path(fake_wwn) expected_path = '/dev/mapper/%s' % fake_wwn self.assertEqual(expected_path, found_path) self.assertTrue(sleep_mock.called) @mock.patch.object(os.path, 'exists', return_value=False) @mock.patch('os_brick.utils._time_sleep') def test_find_multipath_device_path_fail(self, exists_mock, sleep_mock): fake_wwn = '1234567890' found_path = self.linuxscsi.find_multipath_device_path(fake_wwn) self.assertIsNone(found_path) @mock.patch.object(os.path, 'exists', return_value=False) @mock.patch('os_brick.utils._time_sleep') def test_wait_for_path_not_found(self, exists_mock, sleep_mock): path = "/dev/disk/by-id/dm-uuid-mpath-%s" % '1234567890' self.assertRaisesRegex(exception.VolumeDeviceNotFound, r'Volume device not found at %s' % path, self.linuxscsi.wait_for_path, path) @ddt.data({'do_raise': False, 'force': False}, {'do_raise': True, 'force': True}) @ddt.unpack @mock.patch.object(linuxscsi.LinuxSCSI, '_remove_scsi_symlinks') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_del_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'is_multipath_running', return_value=True) @mock.patch.object(linuxscsi.LinuxSCSI, 'flush_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_dm_name') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_volumes_removal') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_scsi_device') def test_remove_connection_multipath_complete(self, remove_mock, wait_mock, find_dm_mock, get_dm_name_mock, flush_mp_mock, is_mp_running_mock, mp_del_path_mock, remove_link_mock, do_raise, force): if do_raise: flush_mp_mock.side_effect = Exception devices_names = ('sda', 'sdb') exc = exception.ExceptionChainer() mp_name = self.linuxscsi.remove_connection(devices_names, force=mock.sentinel.Force, exc=exc) find_dm_mock.assert_called_once_with(devices_names) get_dm_name_mock.assert_called_once_with(find_dm_mock.return_value) flush_mp_mock.assert_called_once_with(get_dm_name_mock.return_value) self.assertEqual(get_dm_name_mock.return_value if do_raise else None, mp_name) is_mp_running_mock.assert_not_called() mp_del_path_mock.assert_has_calls([ mock.call('/dev/sda'), mock.call('/dev/sdb')]) remove_mock.assert_has_calls([ mock.call('/dev/sda', mock.sentinel.Force, exc, False), mock.call('/dev/sdb', mock.sentinel.Force, exc, False)]) wait_mock.assert_called_once_with(devices_names) self.assertEqual(do_raise, bool(exc)) remove_link_mock.assert_called_once_with(devices_names) @mock.patch.object(linuxscsi.LinuxSCSI, '_remove_scsi_symlinks') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_del_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'is_multipath_running', return_value=True) @mock.patch.object(linuxscsi.LinuxSCSI, 'flush_multipath_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_dm_name') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm', return_value=None) @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_volumes_removal') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_scsi_device') def test_remove_connection_multipath_complete_no_dm(self, remove_mock, wait_mock, find_dm_mock, get_dm_name_mock, flush_mp_mock, is_mp_running_mock, mp_del_path_mock, remove_link_mock): devices_names = ('sda', 'sdb') exc = exception.ExceptionChainer() mp_name = self.linuxscsi.remove_connection(devices_names, force=mock.sentinel.Force, exc=exc) find_dm_mock.assert_called_once_with(devices_names) get_dm_name_mock.assert_not_called() flush_mp_mock.assert_not_called() self.assertIsNone(mp_name) is_mp_running_mock.assert_called_once() mp_del_path_mock.assert_has_calls([ mock.call('/dev/sda'), mock.call('/dev/sdb')]) remove_mock.assert_has_calls([ mock.call('/dev/sda', mock.sentinel.Force, exc, False), mock.call('/dev/sdb', mock.sentinel.Force, exc, False)]) wait_mock.assert_called_once_with(devices_names) self.assertFalse(bool(exc)) remove_link_mock.assert_called_once_with(devices_names) @mock.patch.object(linuxscsi.LinuxSCSI, '_remove_scsi_symlinks') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_del_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'is_multipath_running', return_value=True) @mock.patch.object(linuxscsi.LinuxSCSI, 'flush_multipath_device', side_effect=Exception) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_dm_name') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_volumes_removal') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_scsi_device') def test_remove_connection_multipath_fail(self, remove_mock, wait_mock, find_dm_mock, get_dm_name_mock, flush_mp_mock, is_mp_running_mock, mp_del_path_mock, remove_link_mock): flush_mp_mock.side_effect = exception.ExceptionChainer devices_names = ('sda', 'sdb') exc = exception.ExceptionChainer() self.assertRaises(exception.ExceptionChainer, self.linuxscsi.remove_connection, devices_names, force=False, exc=exc) find_dm_mock.assert_called_once_with(devices_names) get_dm_name_mock.assert_called_once_with(find_dm_mock.return_value) flush_mp_mock.assert_called_once_with(get_dm_name_mock.return_value) is_mp_running_mock.assert_not_called() mp_del_path_mock.assert_not_called() remove_mock.assert_not_called() wait_mock.assert_not_called() remove_link_mock.assert_not_called() self.assertTrue(bool(exc)) @mock.patch.object(linuxscsi.LinuxSCSI, '_remove_scsi_symlinks') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_del_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'is_multipath_running', return_value=True) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_volumes_removal') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_scsi_device') def test_remove_connection_singlepath_no_path(self, remove_mock, wait_mock, find_dm_mock, is_mp_running_mock, mp_del_path_mock, remove_link_mock): # Test remove connection when we didn't form a multipath and didn't # even use any of the devices that were found. This means that we # don't flush any of the single paths when removing them. find_dm_mock.return_value = None devices_names = ('sda', 'sdb') exc = exception.ExceptionChainer() self.linuxscsi.remove_connection(devices_names, force=mock.sentinel.Force, exc=exc) find_dm_mock.assert_called_once_with(devices_names) is_mp_running_mock.assert_called_once() mp_del_path_mock.assert_has_calls([ mock.call('/dev/sda'), mock.call('/dev/sdb')]) remove_mock.assert_has_calls( [mock.call('/dev/sda', mock.sentinel.Force, exc, False), mock.call('/dev/sdb', mock.sentinel.Force, exc, False)]) wait_mock.assert_called_once_with(devices_names) remove_link_mock.assert_called_once_with(devices_names) @mock.patch.object(linuxscsi.LinuxSCSI, '_remove_scsi_symlinks') @mock.patch.object(linuxscsi.LinuxSCSI, 'multipath_del_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'is_multipath_running', return_value=False) @mock.patch.object(linuxscsi.LinuxSCSI, 'find_sysfs_multipath_dm') @mock.patch.object(linuxscsi.LinuxSCSI, 'wait_for_volumes_removal') @mock.patch.object(linuxscsi.LinuxSCSI, 'remove_scsi_device') def test_remove_connection_singlepath_used(self, remove_mock, wait_mock, find_dm_mock, is_mp_running_mock, mp_del_path_mock, remove_link_mock): # Test remove connection when we didn't form a multipath and just used # one of the single paths that were found. This means that we don't # flush any of the single paths when removing them. find_dm_mock.return_value = None devices_names = ('sda', 'sdb') exc = exception.ExceptionChainer() # realpath was mocked on test setup with mock.patch('os.path.realpath', side_effect=self.realpath): self.linuxscsi.remove_connection(devices_names, force=mock.sentinel.Force, exc=exc, path_used='/dev/sdb', was_multipath=False) find_dm_mock.assert_called_once_with(devices_names) is_mp_running_mock.assert_called_once() mp_del_path_mock.assert_not_called() remove_mock.assert_has_calls( [mock.call('/dev/sda', mock.sentinel.Force, exc, False), mock.call('/dev/sdb', mock.sentinel.Force, exc, True)]) wait_mock.assert_called_once_with(devices_names) remove_link_mock.assert_called_once_with(devices_names) def test_find_multipath_device_3par_ufn(self): def fake_execute(*cmd, **kwargs): out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n" "size=2.0G features='0' hwhandler='0' wp=rw\n" "`-+- policy='round-robin 0' prio=-1 status=active\n" " |- 0:0:0:1 sde 8:64 active undef running\n" " `- 2:0:0:1 sdf 8:80 active undef running\n" ) return out, None self.linuxscsi._execute = fake_execute info = self.linuxscsi.find_multipath_device('/dev/sde') self.assertEqual("350002ac20398383d", info["id"]) self.assertEqual("mpath6", info["name"]) self.assertEqual("/dev/mapper/mpath6", info["device"]) self.assertEqual("/dev/sde", info['devices'][0]['device']) self.assertEqual("0", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['id']) self.assertEqual("0", info['devices'][0]['channel']) self.assertEqual("1", info['devices'][0]['lun']) self.assertEqual("/dev/sdf", info['devices'][1]['device']) self.assertEqual("2", info['devices'][1]['host']) self.assertEqual("0", info['devices'][1]['id']) self.assertEqual("0", info['devices'][1]['channel']) self.assertEqual("1", info['devices'][1]['lun']) def test_find_multipath_device_svc(self): def fake_execute(*cmd, **kwargs): out = ("36005076da00638089c000000000004d5 dm-2 IBM,2145\n" "size=954M features='1 queue_if_no_path' hwhandler='0'" " wp=rw\n" "|-+- policy='round-robin 0' prio=-1 status=active\n" "| |- 6:0:2:0 sde 8:64 active undef running\n" "| `- 6:0:4:0 sdg 8:96 active undef running\n" "`-+- policy='round-robin 0' prio=-1 status=enabled\n" " |- 6:0:3:0 sdf 8:80 active undef running\n" " `- 6:0:5:0 sdh 8:112 active undef running\n" ) return out, None self.linuxscsi._execute = fake_execute info = self.linuxscsi.find_multipath_device('/dev/sde') self.assertEqual("36005076da00638089c000000000004d5", info["id"]) self.assertEqual("36005076da00638089c000000000004d5", info["name"]) self.assertEqual("/dev/mapper/36005076da00638089c000000000004d5", info["device"]) self.assertEqual("/dev/sde", info['devices'][0]['device']) self.assertEqual("6", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['channel']) self.assertEqual("2", info['devices'][0]['id']) self.assertEqual("0", info['devices'][0]['lun']) self.assertEqual("/dev/sdf", info['devices'][2]['device']) self.assertEqual("6", info['devices'][2]['host']) self.assertEqual("0", info['devices'][2]['channel']) self.assertEqual("3", info['devices'][2]['id']) self.assertEqual("0", info['devices'][2]['lun']) def test_find_multipath_device_ds8000(self): def fake_execute(*cmd, **kwargs): out = ("36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n" "size=1.0G features='1 queue_if_no_path' hwhandler='0'" " wp=rw\n" "`-+- policy='round-robin 0' prio=-1 status=active\n" " |- 6:0:2:0 sdd 8:64 active undef running\n" " `- 6:1:0:3 sdc 8:32 active undef running\n" ) return out, None self.linuxscsi._execute = fake_execute info = self.linuxscsi.find_multipath_device('/dev/sdd') self.assertEqual("36005076303ffc48e0000000000000101", info["id"]) self.assertEqual("36005076303ffc48e0000000000000101", info["name"]) self.assertEqual("/dev/mapper/36005076303ffc48e0000000000000101", info["device"]) self.assertEqual("/dev/sdd", info['devices'][0]['device']) self.assertEqual("6", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['channel']) self.assertEqual("2", info['devices'][0]['id']) self.assertEqual("0", info['devices'][0]['lun']) self.assertEqual("/dev/sdc", info['devices'][1]['device']) self.assertEqual("6", info['devices'][1]['host']) self.assertEqual("1", info['devices'][1]['channel']) self.assertEqual("0", info['devices'][1]['id']) self.assertEqual("3", info['devices'][1]['lun']) def test_find_multipath_device_with_error(self): def fake_execute(*cmd, **kwargs): out = ("Oct 13 10:24:01 | /lib/udev/scsi_id exited with 1\n" "36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n" "size=1.0G features='1 queue_if_no_path' hwhandler='0'" " wp=rw\n" "`-+- policy='round-robin 0' prio=-1 status=active\n" " |- 6:0:2:0 sdd 8:64 active undef running\n" " `- 6:1:0:3 sdc 8:32 active undef running\n" ) return out, None self.linuxscsi._execute = fake_execute info = self.linuxscsi.find_multipath_device('/dev/sdd') self.assertEqual("36005076303ffc48e0000000000000101", info["id"]) self.assertEqual("36005076303ffc48e0000000000000101", info["name"]) self.assertEqual("/dev/mapper/36005076303ffc48e0000000000000101", info["device"]) self.assertEqual("/dev/sdd", info['devices'][0]['device']) self.assertEqual("6", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['channel']) self.assertEqual("2", info['devices'][0]['id']) self.assertEqual("0", info['devices'][0]['lun']) self.assertEqual("/dev/sdc", info['devices'][1]['device']) self.assertEqual("6", info['devices'][1]['host']) self.assertEqual("1", info['devices'][1]['channel']) self.assertEqual("0", info['devices'][1]['id']) self.assertEqual("3", info['devices'][1]['lun']) def test_find_multipath_device_with_multiple_errors(self): def fake_execute(*cmd, **kwargs): out = ("Jun 21 04:39:26 | 8:160: path wwid appears to have " "changed. Using old wwid.\n\n" "Jun 21 04:39:26 | 65:208: path wwid appears to have " "changed. Using old wwid.\n\n" "Jun 21 04:39:26 | 65:208: path wwid appears to have " "changed. Using old wwid.\n" "3624a93707edcfde1127040370004ee62 dm-84 PURE ," "FlashArray\n" "size=100G features='0' hwhandler='0' wp=rw\n" "`-+- policy='queue-length 0' prio=1 status=active\n" " |- 8:0:0:9 sdaa 65:160 active ready running\n" " `- 8:0:1:9 sdac 65:192 active ready running\n" ) return out, None self.linuxscsi._execute = fake_execute info = self.linuxscsi.find_multipath_device('/dev/sdaa') self.assertEqual("3624a93707edcfde1127040370004ee62", info["id"]) self.assertEqual("3624a93707edcfde1127040370004ee62", info["name"]) self.assertEqual("/dev/mapper/3624a93707edcfde1127040370004ee62", info["device"]) self.assertEqual("/dev/sdaa", info['devices'][0]['device']) self.assertEqual("8", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['channel']) self.assertEqual("0", info['devices'][0]['id']) self.assertEqual("9", info['devices'][0]['lun']) self.assertEqual("/dev/sdac", info['devices'][1]['device']) self.assertEqual("8", info['devices'][1]['host']) self.assertEqual("0", info['devices'][1]['channel']) self.assertEqual("1", info['devices'][1]['id']) self.assertEqual("9", info['devices'][1]['lun']) @mock.patch('os_brick.utils._time_sleep') def test_wait_for_rw(self, mock_sleep): lsblk_output = """3624a93709a738ed78583fd1200143029 (dm-2) 0 sdb 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdc 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdd 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sde 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdf 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdg 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sdh 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdi 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdj 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sdk 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdl 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdm 0 vda1 0 vdb 0 vdb1 0 loop0 0""" mock_execute = mock.Mock() mock_execute.return_value = (lsblk_output, None) self.linuxscsi._execute = mock_execute wwn = '3624a93709a738ed78583fd120014a2bb' path = '/dev/disk/by-id/dm-uuid-mpath-' + wwn # Ensure no exception is raised and no sleep is called self.linuxscsi.wait_for_rw(wwn, path) self.assertFalse(mock_sleep.called) @mock.patch('os_brick.utils._time_sleep') def test_wait_for_rw_needs_retry(self, mock_sleep): lsblk_ro_output = """3624a93709a738ed78583fd1200143029 (dm-2) 0 sdb 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdc 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdd 0 3624a93709a738ed78583fd1200143029 (dm-2) 1 sde 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdf 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdg 0 3624a93709a738ed78583fd1200143029 (dm-2) 1 sdh 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdi 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdj 0 3624a93709a738ed78583fd1200143029 (dm-2) 1 sdk 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdl 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdm 0 vda1 0 vdb 0 vdb1 0 loop0 0""" lsblk_rw_output = """3624a93709a738ed78583fd1200143029 (dm-2) 0 sdb 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdc 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdd 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sde 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdf 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdg 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sdh 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdi 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdj 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sdk 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdl 0 3624a93709a738ed78583fd120014a2bb (dm-0) 0 sdm 0 vda1 0 vdb 0 vdb1 0 loop0 0""" mock_execute = mock.Mock() mock_execute.side_effect = [(lsblk_ro_output, None), ('', None), # multipath -r output (lsblk_rw_output, None)] self.linuxscsi._execute = mock_execute wwn = '3624a93709a738ed78583fd1200143029' path = '/dev/disk/by-id/dm-uuid-mpath-' + wwn self.linuxscsi.wait_for_rw(wwn, path) self.assertEqual(1, mock_sleep.call_count) @mock.patch('os_brick.utils._time_sleep') def test_wait_for_rw_always_readonly(self, mock_sleep): lsblk_output = """3624a93709a738ed78583fd1200143029 (dm-2) 0 sdb 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdc 0 3624a93709a738ed78583fd120014a2bb (dm-0) 1 sdd 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sde 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdf 0 3624a93709a738ed78583fd120014a2bb (dm-0) 1 sdg 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sdh 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdi 0 3624a93709a738ed78583fd120014a2bb (dm-0) 1 sdj 0 3624a93709a738ed78583fd1200143029 (dm-2) 0 sdk 0 3624a93709a738ed78583fd120014724e (dm-1) 0 sdl 0 3624a93709a738ed78583fd120014a2bb (dm-0) 1 sdm 0 vda1 0 vdb 0 vdb1 0 loop0 0""" mock_execute = mock.Mock() mock_execute.return_value = (lsblk_output, None) self.linuxscsi._execute = mock_execute wwn = '3624a93709a738ed78583fd120014a2bb' path = '/dev/disk/by-id/dm-uuid-mpath-' + wwn self.assertRaises(exception.BlockDeviceReadOnly, self.linuxscsi.wait_for_rw, wwn, path) self.assertEqual(4, mock_sleep.call_count) def test_find_multipath_device_with_action(self): def fake_execute(*cmd, **kwargs): out = textwrap.dedent(""" create: 36005076303ffc48e0000000000000101 dm-2 IBM,2107900 size=1.0G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=-1 status=active |- 6:0:2:0 sdd 8:64 active undef running `- 6:1:0:3 sdc 8:32 active undef running """) return out, None self.linuxscsi._execute = fake_execute info = self.linuxscsi.find_multipath_device('/dev/sdd') LOG.error("Device info: %s", info) self.assertEqual('36005076303ffc48e0000000000000101', info['id']) self.assertEqual('36005076303ffc48e0000000000000101', info['name']) self.assertEqual('/dev/mapper/36005076303ffc48e0000000000000101', info['device']) self.assertEqual("/dev/sdd", info['devices'][0]['device']) self.assertEqual("6", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['channel']) self.assertEqual("2", info['devices'][0]['id']) self.assertEqual("0", info['devices'][0]['lun']) self.assertEqual("/dev/sdc", info['devices'][1]['device']) self.assertEqual("6", info['devices'][1]['host']) self.assertEqual("1", info['devices'][1]['channel']) self.assertEqual("0", info['devices'][1]['id']) self.assertEqual("3", info['devices'][1]['lun']) def test_multipath_reconfigure(self): self.linuxscsi.multipath_reconfigure() expected_commands = ['multipathd reconfigure'] self.assertEqual(expected_commands, self.cmds) def test_multipath_resize_map(self): dm_path = '/dev/dm-5' self.linuxscsi.multipath_resize_map(dm_path) expected_commands = ['multipathd resize map %s' % dm_path] self.assertEqual(expected_commands, self.cmds) @mock.patch('os_brick.utils.check_valid_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch('os_brick.utils.get_device_size') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') def test_extend_volume_no_mpath(self, mock_device_info, mock_device_size, mock_scsi_wwn, mock_find_mpath_path, mock_valid_dev): """Test extending a volume where there is no multipath device.""" fake_device = {'host': '0', 'channel': '0', 'id': '0', 'lun': '1'} mock_device_info.return_value = fake_device first_size = 1024 second_size = 2048 mock_device_size.side_effect = [first_size, second_size] wwn = '1234567890123456' mock_scsi_wwn.return_value = wwn mock_find_mpath_path.return_value = None mock_valid_dev.return_value = True ret_size = self.linuxscsi.extend_volume(['/dev/fake']) self.assertEqual(second_size, ret_size) # because we don't mock out the echo_scsi_command expected_cmds = ['tee -a /sys/bus/scsi/drivers/sd/0:0:0:1/rescan'] self.assertEqual(expected_cmds, self.cmds) @mock.patch('os_brick.utils.check_valid_device') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch('os_brick.utils.get_device_size') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch('os.path.realpath') def test_extend_volume_with_mpath(self, mock_realpath, mock_device_info, mock_device_size, mock_scsi_wwn, mock_find_mpath_path, mock_valid_dev): """Test extending a volume where there is a multipath device.""" mock_device_info.side_effect = [{'host': host, 'channel': '0', 'id': '0', 'lun': '1'} for host in ['0', '1']] mock_device_size.side_effect = [1024, 2048, 1024, 2048, 1024, 2048] wwn = '1234567890123456' mock_scsi_wwn.return_value = wwn mpath_path = ('/dev/mapper/dm-uuid-mpath-%s' % wwn) mock_find_mpath_path.return_value = mpath_path dm_path = '/dev/dm-5' mock_realpath.return_value = dm_path mock_valid_dev.return_value = True ret_size = self.linuxscsi.extend_volume(['/dev/fake1', '/dev/fake2'], use_multipath=True) self.assertEqual(2048, ret_size) # because we don't mock out the echo_scsi_command expected_cmds = ['tee -a /sys/bus/scsi/drivers/sd/0:0:0:1/rescan', 'tee -a /sys/bus/scsi/drivers/sd/1:0:0:1/rescan', 'multipathd reconfigure', 'multipathd resize map %s' % dm_path] self.assertEqual(expected_cmds, self.cmds) mock_realpath.assert_called_once_with(mpath_path) @mock.patch('os_brick.utils.check_valid_device') @mock.patch.object(linuxscsi.LinuxSCSI, '_multipath_resize_map') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch('os_brick.utils.get_device_size') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') def test_extend_volume_with_mpath_fail(self, mock_device_info, mock_device_size, mock_scsi_wwn, mock_find_mpath_path, mock_mpath_resize_map, mock_valid_dev): """Test extending a volume where there is a multipath device fail.""" mock_device_info.side_effect = [{'host': host, 'channel': '0', 'id': '0', 'lun': '1'} for host in ['0', '1']] mock_device_size.side_effect = [1024, 2048, 1024, 2048, 1024, 2048] wwn = '1234567890123456' mock_scsi_wwn.return_value = wwn mock_find_mpath_path.return_value = ('/dev/mapper/dm-uuid-mpath-%s' % wwn) mock_mpath_resize_map.side_effect = putils.ProcessExecutionError( stdout="fail") mock_valid_dev.return_value = True self.assertRaises( putils.ProcessExecutionError, self.linuxscsi.extend_volume, volume_paths=['/dev/fake1', '/dev/fake2'], use_multipath=True) # because we don't mock out the echo_scsi_command expected_cmds = ['tee -a /sys/bus/scsi/drivers/sd/0:0:0:1/rescan', 'tee -a /sys/bus/scsi/drivers/sd/1:0:0:1/rescan', 'multipathd reconfigure'] self.assertEqual(expected_cmds, self.cmds) @mock.patch('os_brick.utils.check_valid_device') @mock.patch('time.sleep') @mock.patch.object(linuxscsi.LinuxSCSI, '_multipath_resize_map') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch('os_brick.utils.get_device_size') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') @mock.patch('os.path.realpath') def test_extend_volume_with_mpath_pending(self, mock_realpath, mock_device_info, mock_device_size, mock_scsi_wwn, mock_find_mpath_path, mock_mpath_resize_map, mock_sleep, mock_valid_dev): """Test extending a volume where there is a multipath device fail.""" mock_device_info.side_effect = [{'host': host, 'channel': '0', 'id': '0', 'lun': '1'} for host in ['0', '1']] mock_device_size.side_effect = [1024, 2048, 1024, 2048, 1024, 2048] wwn = '1234567890123456' mock_scsi_wwn.return_value = wwn mpath_path = ('/dev/mapper/dm-uuid-mpath-%s' % wwn) mock_find_mpath_path.return_value = mpath_path dm_path = '/dev/dm-5' mock_realpath.return_value = dm_path mock_mpath_resize_map.side_effect = ( putils.ProcessExecutionError(stdout="timeout"), "success") mock_valid_dev.return_value = True ret_size = self.linuxscsi.extend_volume(['/dev/fake1', '/dev/fake2'], use_multipath=True) self.assertEqual(2048, ret_size) # because we don't mock out the echo_scsi_command expected_cmds = ['tee -a /sys/bus/scsi/drivers/sd/0:0:0:1/rescan', 'tee -a /sys/bus/scsi/drivers/sd/1:0:0:1/rescan', 'multipathd reconfigure'] self.assertEqual(expected_cmds, self.cmds) mock_mpath_resize_map.assert_has_calls([mock.call(dm_path)] * 2) mock_realpath.assert_called_once_with(mpath_path) @mock.patch('os_brick.utils.check_valid_device') @mock.patch('time.sleep') @mock.patch('time.time') @mock.patch.object(linuxscsi.LinuxSCSI, '_multipath_resize_map') @mock.patch.object(linuxscsi.LinuxSCSI, 'find_multipath_device_path') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_scsi_wwn') @mock.patch('os_brick.utils.get_device_size') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_device_info') def test_extend_volume_with_mpath_timeout(self, mock_device_info, mock_device_size, mock_scsi_wwn, mock_find_mpath_path, mock_mpath_resize_map, mock_currtime, mock_sleep, mock_valid_dev): """Test extending a volume where there is a multipath device fail.""" mock_device_info.side_effect = [{'host': host, 'channel': '0', 'id': '0', 'lun': '1'} for host in ['0', '1']] mock_device_size.side_effect = [1024, 2048, 1024, 2048, 1024, 2048] wwn = '1234567890123456' mock_scsi_wwn.return_value = wwn mock_find_mpath_path.return_value = ('/dev/mapper/dm-uuid-mpath-%s' % wwn) mock_valid_dev.return_value = True # time.time is used to check if our own timeout has been exceeded, # which is why it has to be mocked. fake_time = 0 def get_fake_time(): nonlocal fake_time fake_time += 10 return fake_time mock_currtime.side_effect = get_fake_time # We're testing the scenario in which the multipath resize map # call times out indefinitely. mock_mpath_resize_map.side_effect = putils.ProcessExecutionError( stdout="timeout") self.assertRaises( putils.ProcessExecutionError, self.linuxscsi.extend_volume, ['/dev/fake1', '/dev/fake2'], use_multipath=True) @mock.patch('os_brick.utils.check_valid_device') def test_extend_volume_with_mpath_path_down(self, mock_valid_dev): """Test extending a volume where there is a path down.""" mock_valid_dev.return_value = False dev1 = '/dev/fake1' dev2 = '/dev/fake2' self.assertRaises( exception.BrickException, self.linuxscsi.extend_volume, [dev1, dev2], use_multipath=True) mock_valid_dev.assert_called_once_with(self.linuxscsi, dev1) def test_process_lun_id_list(self): lun_list = [2, 255, 88, 370, 5, 256] result = self.linuxscsi.process_lun_id(lun_list) expected = [2, 255, 88, '0x0172000000000000', 5, '0x0100000000000000'] self.assertEqual(expected, result) def test_process_lun_id_single_val_make_hex(self): lun_id = 499 result = self.linuxscsi.process_lun_id(lun_id) expected = '0x01f3000000000000' self.assertEqual(expected, result) def test_process_lun_id_single_val_make_hex_border_case(self): lun_id = 256 result = self.linuxscsi.process_lun_id(lun_id) expected = '0x0100000000000000' self.assertEqual(expected, result) def test_process_lun_id_single_var_return(self): lun_id = 13 result = self.linuxscsi.process_lun_id(lun_id) expected = 13 self.assertEqual(expected, result) @mock.patch('os_brick.privileged.rootwrap.execute', return_value=('', '')) def test_is_multipath_running(self, mock_exec): res = linuxscsi.LinuxSCSI.is_multipath_running(None, mock_exec) self.assertTrue(res) mock_exec.assert_called_once_with( 'multipathd', 'show', 'status', run_as_root=True, root_helper=None) @mock.patch.object(linuxscsi, 'LOG') @mock.patch('os_brick.privileged.rootwrap.execute') def test_is_multipath_running_failure_exit_code_0( self, mock_exec, mock_log ): mock_exec.return_value = ('error receiving packet', '') res = linuxscsi.LinuxSCSI.is_multipath_running(None, mock_exec) mock_exec.assert_called_once_with( 'multipathd', 'show', 'status', run_as_root=True, root_helper=None) self.assertFalse(res) @mock.patch.object(linuxscsi, 'LOG') @mock.patch('os_brick.privileged.rootwrap.execute') def test_is_multipath_running_failure_not_enforcing_multipath( self, mock_exec, mock_log ): mock_exec.side_effect = putils.ProcessExecutionError() res = linuxscsi.LinuxSCSI.is_multipath_running(None, mock_exec) mock_exec.assert_called_once_with( 'multipathd', 'show', 'status', run_as_root=True, root_helper=None) self.assertFalse(res) mock_log.error.assert_not_called() @mock.patch.object(linuxscsi, 'LOG') @mock.patch('os_brick.privileged.rootwrap.execute') def test_is_multipath_running_failure_not_enforcing_exit_code_0( self, mock_exec, mock_log ): mock_exec.return_value = ('error receiving packet', '') res = linuxscsi.LinuxSCSI.is_multipath_running(None, mock_exec) mock_exec.assert_called_once_with( 'multipathd', 'show', 'status', run_as_root=True, root_helper=None) self.assertFalse(res) mock_log.error.assert_not_called() def test_get_device_info(self): ret = "[1:1:0:0] disk Vendor Array 0100 /dev/adevice\n" with mock.patch.object(self.linuxscsi, '_execute') as exec_mock: exec_mock.return_value = (ret, "") info = self.linuxscsi.get_device_info('/dev/adevice') exec_mock.assert_called_once_with('lsscsi') self.assertEqual(info, {'channel': '1', 'device': '/dev/adevice', 'host': '1', 'id': '0', 'lun': '0'}) @mock.patch('builtins.open') def test_get_sysfs_wwn_mpath(self, open_mock): wwn = '3600d0230000000000e13955cc3757800' cm_open = open_mock.return_value.__enter__.return_value cm_open.read.return_value = 'mpath-' + wwn res = self.linuxscsi.get_sysfs_wwn(mock.sentinel.device_names, 'dm-1') open_mock.assert_called_once_with('/sys/block/dm-1/dm/uuid') self.assertEqual(wwn, res) @mock.patch('glob.glob') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwid') def test_get_sysfs_wwn_single_designator(self, get_wwid_mock, glob_mock): glob_mock.return_value = ['/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-wwid2'] get_wwid_mock.return_value = 'wwid1' res = self.linuxscsi.get_sysfs_wwn(mock.sentinel.device_names) self.assertEqual('wwid1', res) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') get_wwid_mock.assert_called_once_with(mock.sentinel.device_names) @mock.patch('builtins.open', side_effect=Exception) @mock.patch('glob.glob') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwid') def test_get_sysfs_wwn_mpath_exc(self, get_wwid_mock, glob_mock, open_mock): glob_mock.return_value = ['/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-wwid2'] get_wwid_mock.return_value = 'wwid1' res = self.linuxscsi.get_sysfs_wwn(mock.sentinel.device_names, 'dm-1') open_mock.assert_called_once_with('/sys/block/dm-1/dm/uuid') self.assertEqual('wwid1', res) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') get_wwid_mock.assert_called_once_with(mock.sentinel.device_names) @mock.patch('os.listdir', return_value=['sda', 'sdd']) @mock.patch('os.path.realpath', side_effect=('/other/path', '/dev/dm-5', '/dev/sda', '/dev/sdb')) @mock.patch('os.path.islink', side_effect=(False,) + (True,) * 5) @mock.patch('os.stat', side_effect=(False,) + (True,) * 4) @mock.patch('glob.glob') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwid') def test_get_sysfs_wwn_multiple_designators(self, get_wwid_mock, glob_mock, stat_mock, islink_mock, realpath_mock, listdir_mock): glob_mock.return_value = ['/dev/disk/by-id/scsi-fail-link', '/dev/disk/by-id/scsi-fail-stat', '/dev/disk/by-id/scsi-non-dev', '/dev/disk/by-id/scsi-another-dm', '/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-wwid2'] get_wwid_mock.return_value = 'pre-wwid' devices = ['sdb', 'sdc'] res = self.linuxscsi.get_sysfs_wwn(devices) self.assertEqual('wwid2', res) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') listdir_mock.assert_called_once_with('/sys/class/block/dm-5/slaves') get_wwid_mock.assert_called_once_with(devices) @mock.patch('os.listdir', side_effect=[['sda', 'sdb'], ['sdc', 'sdd']]) @mock.patch('os.path.realpath', side_effect=('/dev/sde', '/dev/dm-5', '/dev/dm-6')) @mock.patch('os.path.islink', mock.Mock()) @mock.patch('os.stat', mock.Mock()) @mock.patch('glob.glob') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwid', return_value='') def test_get_sysfs_wwn_dm_link(self, get_wwid_mock, glob_mock, realpath_mock, listdir_mock): glob_mock.return_value = ['/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-another-dm', '/dev/disk/by-id/scsi-our-dm'] devices = ['sdc', 'sdd'] res = self.linuxscsi.get_sysfs_wwn(devices) self.assertEqual('our-dm', res) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') listdir_mock.assert_has_calls( [mock.call('/sys/class/block/dm-5/slaves'), mock.call('/sys/class/block/dm-6/slaves')]) get_wwid_mock.assert_called_once_with(devices) @mock.patch('os.path.realpath', side_effect=('/dev/sda', '/dev/sdb')) @mock.patch('os.path.islink', return_value=True) @mock.patch('os.stat', return_value=True) @mock.patch('glob.glob') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwid') def test_get_sysfs_wwn_not_found(self, get_wwid_mock, glob_mock, stat_mock, islink_mock, realpath_mock): glob_mock.return_value = ['/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-wwid2'] get_wwid_mock.return_value = 'pre-wwid' devices = ['sdc'] res = self.linuxscsi.get_sysfs_wwn(devices) self.assertEqual('', res) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') get_wwid_mock.assert_called_once_with(devices) @mock.patch('glob.glob', return_value=[]) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_sysfs_wwid') def test_get_sysfs_wwn_no_links(self, get_wwid_mock, glob_mock): get_wwid_mock.return_value = '' devices = ['sdc'] res = self.linuxscsi.get_sysfs_wwn(devices) self.assertEqual('', res) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') get_wwid_mock.assert_called_once_with(devices) @ddt.data({'wwn_type': 't10.', 'num_val': '1'}, {'wwn_type': 'eui.', 'num_val': '2'}, {'wwn_type': 'naa.', 'num_val': '3'}) @ddt.unpack @mock.patch('builtins.open') def test_get_sysfs_wwid(self, open_mock, wwn_type, num_val): read_fail = mock.MagicMock() read_fail.__enter__.return_value.read.side_effect = IOError read_data = mock.MagicMock() read_data.__enter__.return_value.read.return_value = (wwn_type + 'wwid1\n') open_mock.side_effect = (IOError, read_fail, read_data) res = self.linuxscsi.get_sysfs_wwid(['sda', 'sdb', 'sdc']) self.assertEqual(num_val + 'wwid1', res) open_mock.assert_has_calls([mock.call('/sys/block/sda/device/wwid'), mock.call('/sys/block/sdb/device/wwid'), mock.call('/sys/block/sdc/device/wwid')]) @mock.patch('builtins.open', side_effect=IOError) def test_get_sysfs_wwid_not_found(self, open_mock): res = self.linuxscsi.get_sysfs_wwid(['sda', 'sdb']) self.assertEqual('', res) open_mock.assert_has_calls([mock.call('/sys/block/sda/device/wwid'), mock.call('/sys/block/sdb/device/wwid')]) @mock.patch.object(linuxscsi.priv_rootwrap, 'unlink_root') @mock.patch('glob.glob') @mock.patch('os.path.realpath', side_effect=['/dev/sda', '/dev/sdb', '/dev/sdc']) def test_remove_scsi_symlinks(self, realpath_mock, glob_mock, unlink_mock): paths = ['/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-wwid2', '/dev/disk/by-id/scsi-wwid3'] glob_mock.return_value = paths self.linuxscsi._remove_scsi_symlinks(['sdb', 'sdc', 'sdd']) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') realpath_mock.assert_has_calls([mock.call(g) for g in paths]) unlink_mock.assert_called_once_with(no_errors=True, *paths[1:]) @mock.patch.object(linuxscsi.priv_rootwrap, 'unlink_root') @mock.patch('glob.glob') @mock.patch('os.path.realpath', side_effect=['/dev/sda', '/dev/sdb']) def test_remove_scsi_symlinks_no_links(self, realpath_mock, glob_mock, unlink_mock): paths = ['/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-wwid2'] glob_mock.return_value = paths self.linuxscsi._remove_scsi_symlinks(['sdd', 'sde']) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') realpath_mock.assert_has_calls([mock.call(g) for g in paths]) unlink_mock.assert_not_called() @mock.patch.object(linuxscsi.priv_rootwrap, 'unlink_root') @mock.patch('glob.glob') @mock.patch('os.path.realpath', side_effect=[OSError, '/dev/sda']) def test_remove_scsi_symlinks_race_condition(self, realpath_mock, glob_mock, unlink_mock): paths = ['/dev/disk/by-id/scsi-wwid1', '/dev/disk/by-id/scsi-wwid2'] glob_mock.return_value = paths self.linuxscsi._remove_scsi_symlinks(['sda']) glob_mock.assert_called_once_with('/dev/disk/by-id/scsi-*') realpath_mock.assert_has_calls([mock.call(g) for g in paths]) unlink_mock.assert_called_once_with(paths[1], no_errors=True) @mock.patch('glob.glob') def test_get_hctl_with_target(self, glob_mock): glob_mock.return_value = [ '/sys/class/iscsi_host/host3/device/session1/target3:4:5', '/sys/class/iscsi_host/host3/device/session1/target3:4:6'] res = self.linuxscsi.get_hctl('1', '2') self.assertEqual(('3', '4', '5', '2'), res) glob_mock.assert_called_once_with( '/sys/class/iscsi_host/host*/device/session1/target*') @mock.patch('glob.glob') def test_get_hctl_no_target(self, glob_mock): glob_mock.side_effect = [ [], ['/sys/class/iscsi_host/host3/device/session1', '/sys/class/iscsi_host/host3/device/session1']] res = self.linuxscsi.get_hctl('1', '2') self.assertEqual(('3', '-', '-', '2'), res) glob_mock.assert_has_calls( [mock.call('/sys/class/iscsi_host/host*/device/session1/target*'), mock.call('/sys/class/iscsi_host/host*/device/session1')]) @mock.patch('glob.glob', return_value=[]) def test_get_hctl_no_paths(self, glob_mock): res = self.linuxscsi.get_hctl('1', '2') self.assertIsNone(res) glob_mock.assert_has_calls( [mock.call('/sys/class/iscsi_host/host*/device/session1/target*'), mock.call('/sys/class/iscsi_host/host*/device/session1')]) @mock.patch('glob.glob') def test_device_name_by_hctl(self, glob_mock): glob_mock.return_value = [ '/sys/class/scsi_host/host3/device/session1/target3:4:5/3:4:5:2/' 'block/sda2', '/sys/class/scsi_host/host3/device/session1/target3:4:5/3:4:5:2/' 'block/sda'] res = self.linuxscsi.device_name_by_hctl('1', ('3', '4', '5', '2')) self.assertEqual('sda', res) glob_mock.assert_called_once_with( '/sys/class/scsi_host/host3/device/session1/target3:4:5/3:4:5:2/' 'block/*') @mock.patch('glob.glob') def test_device_name_by_hctl_wildcards(self, glob_mock): glob_mock.return_value = [ '/sys/class/scsi_host/host3/device/session1/target3:4:5/3:4:5:2/' 'block/sda2', '/sys/class/scsi_host/host3/device/session1/target3:4:5/3:4:5:2/' 'block/sda'] res = self.linuxscsi.device_name_by_hctl('1', ('3', '-', '-', '2')) self.assertEqual('sda', res) glob_mock.assert_called_once_with( '/sys/class/scsi_host/host3/device/session1/target3:*:*/3:*:*:2/' 'block/*') @mock.patch('glob.glob', mock.Mock(return_value=[])) def test_device_name_by_hctl_no_devices(self): res = self.linuxscsi.device_name_by_hctl('1', ('4', '5', '6', '2')) self.assertIsNone(res) @mock.patch.object(linuxscsi.LinuxSCSI, 'echo_scsi_command') def test_scsi_iscsi(self, echo_mock): self.linuxscsi.scan_iscsi('host', 'channel', 'target', 'lun') echo_mock.assert_called_once_with('/sys/class/scsi_host/hosthost/scan', 'channel target lun') def test_multipath_add_wwid(self): self.linuxscsi.multipath_add_wwid('wwid1') self.assertEqual(['multipath -a wwid1'], self.cmds) def test_multipath_add_path(self): self.linuxscsi.multipath_add_path('/dev/sda') self.assertEqual(['multipathd add path /dev/sda'], self.cmds) def test_multipath_del_path(self): self.linuxscsi.multipath_del_path('/dev/sda') self.assertEqual(['multipathd del path /dev/sda'], self.cmds) @mock.patch.object(linuxscsi.LinuxSCSI, 'get_dm_name', return_value=None) def test_multipath_del_map_not_present(self, name_mock): self.linuxscsi.multipath_del_map('dm-7') self.assertEqual([], self.cmds) name_mock.assert_called_once_with('dm-7') @mock.patch.object(linuxscsi.LinuxSCSI, '_execute') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_dm_name', return_value=None) def test_multipath_del_map(self, name_mock, exec_mock): exec_mock.side_effect = [putils.ProcessExecutionError, None] mpath_name = '3600d0230000000000e13955cc3757800' name_mock.side_effect = [mpath_name, mpath_name, None] self.linuxscsi.multipath_del_map('dm-7') self.assertEqual(2, exec_mock.call_count) exec_mock.assert_has_calls( [mock.call('multipathd', 'del', 'map', mpath_name, run_as_root=True, timeout=5, root_helper=self.linuxscsi._root_helper)] * 2) self.assertEqual(3, name_mock.call_count) name_mock.assert_has_calls([mock.call('dm-7')] * 3) @mock.patch.object(linuxscsi.LinuxSCSI, '_execute') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_dm_name') def test_multipath_del_map_retries_cmd_fails(self, name_mock, exec_mock): exec_mock.side_effect = putils.ProcessExecutionError mpath_name = '3600d0230000000000e13955cc3757800' name_mock.return_value = mpath_name self.assertRaises(putils.ProcessExecutionError, self.linuxscsi.multipath_del_map, 'dm-7') self.assertEqual(3, exec_mock.call_count) exec_mock.assert_has_calls( [mock.call('multipathd', 'del', 'map', mpath_name, run_as_root=True, timeout=5, root_helper=self.linuxscsi._root_helper)] * 3) self.assertEqual(3, name_mock.call_count) name_mock.assert_has_calls([mock.call('dm-7')] * 3) @mock.patch.object(linuxscsi.LinuxSCSI, '_execute') @mock.patch.object(linuxscsi.LinuxSCSI, 'get_dm_name') def test_multipath_del_map_retries_remains(self, name_mock, exec_mock): mpath_name = '3600d0230000000000e13955cc3757800' name_mock.return_value = mpath_name self.assertRaises(exception.BrickException, self.linuxscsi.multipath_del_map, 'dm-7') self.assertEqual(3, exec_mock.call_count) exec_mock.assert_has_calls( [mock.call('multipathd', 'del', 'map', mpath_name, run_as_root=True, timeout=5, root_helper=self.linuxscsi._root_helper)] * 3) self.assertEqual(6, name_mock.call_count) name_mock.assert_has_calls([mock.call('dm-7')] * 6) @ddt.data(('/dev/sda', '/dev/sda', False, True, None), # This checks that we ignore the was_multipath parameter if it # doesn't make sense (because the used path is the one we are # asking about) ('/dev/sda', '/dev/sda', True, True, None), ('/dev/sda', '', True, False, None), # Check for encrypted volume ('/dev/link_sda', '/dev/disk/by-path/pci-XYZ', False, True, ('/dev/sda', '/dev/mapper/crypt-pci-XYZ')), ('/dev/link_sda', '/dev/link_sdb', False, False, ('/dev/sda', '/dev/sdb')), ('/dev/link_sda', '/dev/link2_sda', False, True, ('/dev/sda', '/dev/sda'))) @ddt.unpack def test_requires_flush(self, path, path_used, was_multipath, expected, real_paths): with mock.patch('os.path.realpath', side_effect=real_paths) as mocked: self.assertEqual( expected, self.linuxscsi.requires_flush(path, path_used, was_multipath)) if real_paths: mocked.assert_has_calls([mock.call(path), mock.call(path_used)]) @ddt.data(None, 'SAM', 'transparent') def test_lun_for_addressing_transparent_sam(self, mode): lun = self.linuxscsi.lun_for_addressing(1, mode) self.assertEqual(1, lun) lun = self.linuxscsi.lun_for_addressing(256, mode) self.assertEqual(256, lun) @ddt.data(1, 'SAM3', 'TRANSPARENT', 'sam', 'sam2') def test_lun_for_addressing_bad(self, mode): self.assertRaises(exception.InvalidParameterValue, self.linuxscsi.lun_for_addressing, 1, mode) @ddt.data((1, 1), (100, 100), (256, 16640), (1010, 17394)) @ddt.unpack def test_lun_for_addressing_sam2(self, original_lun, expected_lun): lun = self.linuxscsi.lun_for_addressing(original_lun, 'SAM2') self.assertEqual(expected_lun, lun) @ddt.data((0, 16384), (100, 16484), (256, 16640), (1010, 17394)) @ddt.unpack def test_lun_for_addressing_sam3_flat(self, original_lun, expected_lun): lun = self.linuxscsi.lun_for_addressing(original_lun, 'SAM3-flat') self.assertEqual(expected_lun, lun) @mock.patch.object(linuxscsi, 'LOG') @mock.patch.object(linuxscsi.LinuxSCSI, '_execute') def test_wait_for_mpath_device(self, exec_mock, mock_log): exec_mock.return_value = ( "3293379.070675 | /dev/dm-7: path sdb is usable", None, ) mpath_name = 'dm-7' self.linuxscsi.wait_for_mpath_device(mpath_name) self.assertEqual(1, exec_mock.call_count) exec_mock.assert_called_once_with( 'multipath', '-C', mpath_name, attempts=4, interval=1, run_as_root=True, root_helper=self.linuxscsi._root_helper) mock_log.error.assert_not_called() @mock.patch.object(linuxscsi, 'LOG') @mock.patch.object(linuxscsi.LinuxSCSI, '_execute') def test_wait_for_mpath_device_fails(self, exec_mock, mock_log): exec_mock.side_effect = putils.ProcessExecutionError mpath_name = 'dm-7' exc = self.assertRaises(putils.ProcessExecutionError, self.linuxscsi.wait_for_mpath_device, 'dm-7') exec_mock.assert_called_once_with( 'multipath', '-C', mpath_name, attempts=4, interval=1, run_as_root=True, root_helper=self.linuxscsi._root_helper) mock_log.error.assert_called_once_with( "Failed to get mpath device %(mpath)s ready for " "I/O: %(except)s", {'mpath': mpath_name, 'except': exc}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/test_storpool_utils.py0000664000175000017500000001601700000000000025073 0ustar00zuulzuul00000000000000# Copyright (c) 2015 - 2024 StorPool # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json import os from unittest import mock from os_brick import exception from os_brick.initiator import storpool_utils from os_brick.tests import base STORPOOL_CONF_INI_NO_HOSTNAME = """\ SP_API_HTTP_HOST=127.0.0.1 SP_API_HTTP_PORT=81 SP_AUTH_TOKEN=1234567890123456789 [another-node] SP_OURID=2 """ STORPOOL_CONF_INI = STORPOOL_CONF_INI_NO_HOSTNAME + """\ [this-node] SP_OURID=1 """ STORPOOL_CONF_INI_SELECTOR = "full" ANOTHER_CONF_INI = """\ SP_API_HTTP_HOST=127.0.100.1 SP_API_HTTP_PORT=8080 """ SP_CONF = { 'SP_API_HTTP_HOST': '127.0.100.1', 'SP_API_HTTP_PORT': '8080', 'SP_AUTH_TOKEN': '1234567890123456789', 'SP_OURID': '1' } def faulty_api(req): faulty_api.real_fn(req) if faulty_api.fail_count > 0: faulty_api.fail_count -= 1 raise storpool_utils.StorPoolAPIError( 500, { 'error': { 'name': 'busy', 'descr': "'os--volume--sp-vol-1' is open at client 19" } }) def _fake_open(path): data = "" if path.name == '/etc/storpool.conf': if STORPOOL_CONF_INI_SELECTOR == 'full': data = STORPOOL_CONF_INI if STORPOOL_CONF_INI_SELECTOR == 'no-hostname': data = STORPOOL_CONF_INI_NO_HOSTNAME elif path.name == '/etc/storpool.conf.d/another.conf': data = ANOTHER_CONF_INI else: raise Exception(f"Called open with an unexpected path: {path}") open_mock = mock.Mock() open_mock.read = lambda: data ctx_mock = mock.Mock() ctx_mock.__enter__ = mock.Mock(return_value=open_mock) ctx_mock.__exit__ = mock.Mock() return ctx_mock def _fake_node(): return 'this-node' class FakePath: def __init__(self, name, exists, is_file, dir_contents = None): self.name = name self.exists = exists self.is_a_file = is_file self.dir_contents = dir_contents def is_file(self): return self.exists and self.is_a_file def is_dir(self): return self.exists and not self.is_a_file def iterdir(self): if self.dir_contents is None: raise Exception( f"Called iterdir() on a non-directory: {self.name}") return self.dir_contents def __str__(self): return self.name @mock.patch('builtins.open', _fake_open) @mock.patch('platform.node', _fake_node) class StorPoolConfTestCase(base.TestCase): def setUp(self): super(StorPoolConfTestCase, self).setUp() self.mock_path = mock.Mock() self.fs_tree = [ FakePath('/etc/storpool.conf', True, True), FakePath('/etc/storpool.conf.d', True, False, []) ] def test_subconf_overrides_main(self): self.fs_tree[1] = FakePath('/etc/storpool.conf.d', True, False, [ FakePath('/etc/storpool.conf.d/another.conf', True, True) ]) self._fs_init(self.fs_tree, 'full') with mock.patch('pathlib.Path', self.mock_path): conf = storpool_utils.get_conf() self.assertEqual(SP_CONF, conf) def test_only_storpool_conf(self): self._fs_init(self.fs_tree, 'full') sp_conf_expected = copy.deepcopy(SP_CONF) sp_conf_expected['SP_API_HTTP_HOST'] = '127.0.0.1' sp_conf_expected['SP_API_HTTP_PORT'] = '81' with mock.patch('pathlib.Path', self.mock_path): conf = storpool_utils.get_conf() self.assertEqual(sp_conf_expected, conf) def test_env_overrides_main(self): self._fs_init(self.fs_tree, 'full') overrides_expected = { 'SP_API_HTTP_HOST': '192.168.0.10', 'SP_API_HTTP_PORT': '8123' } sp_conf_expected = copy.deepcopy(SP_CONF) sp_conf_expected.update(overrides_expected) with (mock.patch('pathlib.Path', self.mock_path), mock.patch.dict(os.environ, overrides_expected)): conf = storpool_utils.get_conf() self.assertEqual(sp_conf_expected, conf) def test_raise_if_no_storpool_conf(self): self.fs_tree[0] = FakePath('/etc/storpool.conf', False, True) self._fs_init(self.fs_tree, 'full') with mock.patch('pathlib.Path', self.mock_path): self.assertRaises(exception.BrickException, storpool_utils.get_conf) def _fs_init(self, fs, storpool_conf_type): global STORPOOL_CONF_INI_SELECTOR STORPOOL_CONF_INI_SELECTOR = storpool_conf_type self.mock_path.side_effect = fs class StorPoolAPITestCase(base.TestCase): def setUp(self): super(StorPoolAPITestCase, self).setUp() self.api = storpool_utils.StorPoolAPI( '127.0.0.1', '81', '1234567890123456789') def test_api_ok(self): with mock.patch('http.client.HTTPConnection') as connection_mock: resp = mock.Mock() c_mock = connection_mock.return_value c_mock.getresponse = mock.Mock(return_value=resp) resp.status = 200 resp.read = lambda: '{ "data": [{ "name": "test-volume" }] }' self.assertEqual(self.api.volumes_list(), [{'name': 'test-volume'}]) def test_api_exceptions(self): with mock.patch('http.client.HTTPConnection') as connection_mock: resp = mock.Mock() c_mock = connection_mock.return_value c_mock.getresponse = mock.Mock(return_value=resp) resp.status = 200 resp.read = lambda: '{}' self.assertRaises(KeyError, self.api.volumes_list) resp.read = lambda: '{/}' self.assertRaises(json.JSONDecodeError, self.api.volumes_list) resp.read = lambda: '{ "error": { "transient": true } }' self.assertRaises(storpool_utils.StorPoolAPIError, self.api.volumes_list) def test_api_handle_transient(self): with mock.patch('http.client.HTTPConnection') as connection_mock: resp = mock.Mock() resp.status = 500 resp.read = lambda: '{ "error": { "transient": true } }' resp1 = mock.Mock() resp1.status = 200 resp1.read = lambda: '{ "data": [{ "name": "test-volume" }] }' c_mock = connection_mock.return_value c_mock.getresponse = mock.Mock(side_effect=[resp, resp, resp1]) self.assertEqual(self.api.volumes_list(), [{'name': 'test-volume'}]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/initiator/test_utils.py0000664000175000017500000000642400000000000023133 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_brick.initiator import utils from os_brick.tests import base class InitiatorUtilsTestCase(base.TestCase): @mock.patch('os.name', 'nt') def test_check_manual_scan_windows(self): self.assertFalse(utils.check_manual_scan()) @mock.patch('os.name', 'posix') @mock.patch('oslo_concurrency.processutils.execute') def test_check_manual_scan_supported(self, mock_exec): self.assertTrue(utils.check_manual_scan()) mock_exec.assert_called_once_with('grep', '-F', 'node.session.scan', '/sbin/iscsiadm') @mock.patch('os.name', 'posix') @mock.patch('oslo_concurrency.processutils.execute', side_effect=utils.putils.ProcessExecutionError) def test_check_manual_scan_not_supported(self, mock_exec): self.assertFalse(utils.check_manual_scan()) mock_exec.assert_called_once_with('grep', '-F', 'node.session.scan', '/sbin/iscsiadm') @mock.patch('oslo_concurrency.lockutils.lock') def test_guard_connection_manual_scan_support(self, mock_lock): utils.ISCSI_SUPPORTS_MANUAL_SCAN = True # We confirm that shared_targets is ignored with utils.guard_connection({'shared_targets': True}): mock_lock.assert_not_called() @mock.patch('oslo_concurrency.lockutils.lock') def test_guard_connection_manual_scan_support_forced(self, mock_lock): """Guard locks when cinder forces locking.""" utils.ISCSI_SUPPORTS_MANUAL_SCAN = True # We confirm that shared_targets is ignored with utils.guard_connection({'service_uuid': mock.sentinel.uuid, 'shared_targets': None}): mock_lock.assert_called_once_with(mock.sentinel.uuid, 'os-brick-', external=True) @mock.patch('oslo_concurrency.lockutils.lock') def test_guard_connection_manual_scan_unsupported_not_shared(self, mock_lock): utils.ISCSI_SUPPORTS_MANUAL_SCAN = False with utils.guard_connection({'shared_targets': False}): mock_lock.assert_not_called() @mock.patch('oslo_concurrency.lockutils.lock') def test_guard_connection_manual_scan_unsupported_shared(self, mock_lock): utils.ISCSI_SUPPORTS_MANUAL_SCAN = False with utils.guard_connection({'service_uuid': mock.sentinel.uuid, 'shared_targets': True}): mock_lock.assert_called_once_with(mock.sentinel.uuid, 'os-brick-', external=True) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.627759 os_brick-6.11.0/os_brick/tests/local_dev/0000775000175000017500000000000000000000000020302 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/local_dev/__init__.py0000664000175000017500000000000000000000000022401 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/local_dev/fake_lvm.py0000664000175000017500000000334500000000000022445 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FakeBrickLVM(object): """Logs and records calls, for unit tests.""" def __init__(self, vg_name, create, pv_list, vtype, execute=None): super(FakeBrickLVM, self).__init__() self.vg_size = '5.00' self.vg_free_space = '5.00' self.vg_name = vg_name def supports_thin_provisioning(): return False def get_volumes(self): return ['fake-volume'] def get_volume(self, name): return ['name'] def get_all_physical_volumes(vg_name=None): return [] def get_physical_volumes(self): return [] def update_volume_group_info(self): pass def create_thin_pool(self, name=None, size_str=0): pass def create_volume(self, name, size_str, lv_type='default', mirror_count=0): pass def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): pass def delete(self, name): pass def revert(self, snapshot_name): pass def lv_has_snapshot(self, name): return False def activate_lv(self, lv, is_snapshot=False, permanent=False): pass def rename_volume(self, lv_name, new_name): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/local_dev/test_brick_lvm.py0000664000175000017500000003767100000000000023701 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_concurrency import processutils from os_brick import exception from os_brick import executor as os_brick_executor from os_brick.local_dev import lvm as brick from os_brick.privileged import rootwrap as priv_rootwrap from os_brick.tests import base class BrickLvmTestCase(base.TestCase): def setUp(self): super(BrickLvmTestCase, self).setUp() if not hasattr(self, 'configuration'): self.configuration = mock.Mock() self.configuration.lvm_suppress_fd_warnings = False self.volume_group_name = 'fake-vg' # Stub processutils.execute for static methods self.mock_object(priv_rootwrap, 'execute', self.fake_execute) self.vg = brick.LVM( self.volume_group_name, 'sudo', create_vg=False, physical_volumes=None, lvm_type='default', executor=self.fake_execute, suppress_fd_warn=self.configuration.lvm_suppress_fd_warnings) def failed_fake_execute(obj, *cmd, **kwargs): return ("\n", "fake-error") def fake_pretend_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.03.00 (2012-03-06)\n", "") def fake_old_lvm_version(obj, *cmd, **kwargs): # Does not support thin prov or snap activation return (" LVM version: 2.02.65(2) (2012-03-06)\n", "") def fake_customised_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "") def fake_f23_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.02.132(2) (2015-09-22)\n", "") def fake_execute(obj, *cmd, **kwargs): # TODO(eharney): remove this and move to per-test mocked execute calls if obj.configuration.lvm_suppress_fd_warnings: _lvm_prefix = 'env, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=1, ' else: _lvm_prefix = 'env, LC_ALL=C, ' cmd_string = ', '.join(cmd) data = "\n" if (_lvm_prefix + 'vgs, --noheadings, --unit=g, -o, name' == cmd_string): data = " fake-vg\n" data += " some-other-vg\n" elif (_lvm_prefix + 'vgs, --noheadings, -o, name, fake-vg' == cmd_string): data = " fake-vg\n" elif _lvm_prefix + 'vgs, --version' in cmd_string: data = " LVM version: 2.02.95(2) (2012-03-06)\n" elif (_lvm_prefix + 'vgs, --noheadings, -o, uuid, fake-vg' in cmd_string): data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" elif _lvm_prefix + 'vgs, --noheadings, --unit=g, ' \ '-o, name,size,free,lv_count,uuid, ' \ '--separator, :, --nosuffix' in cmd_string: data = (" test-prov-cap-vg-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-unit' in cmd_string: return (data, "") data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-no-unit' in cmd_string: return (data, "") data = " fake-vg:10.00:10.00:0:"\ "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" if 'fake-vg' in cmd_string: return (data, "") data += " fake-vg-2:10.00:10.00:0:"\ "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" data += " fake-vg-3:10.00:10.00:0:"\ "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" elif (_lvm_prefix + 'lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-nothere' in cmd_string): raise processutils.ProcessExecutionError( stderr="One or more specified logical volume(s) not found.") elif (_lvm_prefix + 'lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-newerror' in cmd_string): raise processutils.ProcessExecutionError( stderr="Failed to find logical volume \"fake-vg/lv-newerror\"") elif (_lvm_prefix + 'lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size' in cmd_string): if 'fake-unknown' in cmd_string: raise processutils.ProcessExecutionError( stderr="One or more volume(s) not found." ) if 'test-prov-cap-vg-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-unit 9.50g\n" data += " fake-vg fake-volume-1 1.00g\n" data += " fake-vg fake-volume-2 2.00g\n" elif 'test-prov-cap-vg-no-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-no-unit 9.50\n" data += " fake-vg fake-volume-1 1.00\n" data += " fake-vg fake-volume-2 2.00\n" elif 'test-found-lv-name' in cmd_string: data = " fake-vg test-found-lv-name 9.50\n" else: data = " fake-vg fake-1 1.00g\n" data += " fake-vg fake-2 1.00g\n" elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Attr' in cmd_string): if 'test-volumes' in cmd_string: data = ' wi-a-' else: data = ' owi-a-' elif _lvm_prefix + 'pvs, --noheadings' in cmd_string: data = " fake-vg|/dev/sda|10.00|1.00\n" data += " fake-vg|/dev/sdb|10.00|1.00\n" data += " fake-vg|/dev/sdc|10.00|8.99\n" data += " fake-vg-2|/dev/sdd|10.00|9.99\n" elif _lvm_prefix + 'lvs, --noheadings, --unit=g' \ ', -o, size,data_percent, --separator, :' in cmd_string: if 'test-prov-cap-pool' in cmd_string: data = " 9.5:20\n" else: data = " 9:12\n" elif 'lvcreate, -T, -L, ' in cmd_string: pass elif 'lvcreate, -T, -l, 100%FREE' in cmd_string: pass elif 'lvcreate, -T, -V, ' in cmd_string: pass elif 'lvcreate, -n, ' in cmd_string: pass elif 'lvcreate, --name, ' in cmd_string: pass elif 'lvextend, -L, ' in cmd_string: pass else: raise AssertionError('unexpected command called: %s' % cmd_string) return (data, "") def test_create_lv_snapshot(self): self.assertIsNone(self.vg.create_lv_snapshot('snapshot-1', 'fake-1')) with mock.patch.object(self.vg, 'get_volume', return_value=None): try: self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent') except exception.VolumeDeviceNotFound as e: self.assertEqual('fake-non-existent', e.kwargs['device']) else: self.fail("Exception not raised") def test_vg_exists(self): self.assertTrue(self.vg._vg_exists()) def test_get_vg_uuid(self): self.assertEqual('kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1', self.vg._get_vg_uuid()[0]) def test_get_all_volumes(self): out = self.vg.get_volumes() self.assertEqual('fake-1', out[0]['name']) self.assertEqual('1.00g', out[0]['size']) self.assertEqual('fake-vg', out[0]['vg']) def test_get_volume(self): self.assertEqual('fake-1', self.vg.get_volume('fake-1')['name']) def test_get_volume_none(self): self.assertIsNone(self.vg.get_volume('fake-unknown')) def test_get_lv_info_notfound(self): # lv-nothere will raise lvm < 2.102.112 exception self.assertEqual( [], self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='lv-nothere') ) # lv-newerror will raise lvm > 2.102.112 exception self.assertEqual( [], self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='lv-newerror') ) def test_get_lv_info_found(self): lv_info = [{'size': '9.50', 'name': 'test-found-lv-name', 'vg': 'fake-vg'}] self.assertEqual( lv_info, self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='test-found-lv-name') ) def test_get_lv_info_no_lv_name(self): lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'}, {'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}] self.assertEqual( lv_info, self.vg.get_lv_info( 'sudo', vg_name='fake-vg') ) @mock.patch('tenacity.nap.sleep', mock.Mock()) @mock.patch.object(brick.putils, 'execute') def test_get_lv_info_retry(self, exec_mock): exec_mock.side_effect = ( processutils.ProcessExecutionError('', '', exit_code=139), ('vg name size', ''), ) self.assertEqual( [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'}, {'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}], self.vg.get_lv_info('sudo', vg_name='vg', lv_name='name') ) @mock.patch('tenacity.nap.sleep', mock.Mock()) @mock.patch.object(os_brick_executor.Executor, '_execute') def test_get_thin_pool_free_space_retry(self, exec_mock): exec_mock.side_effect = ( processutils.ProcessExecutionError('', '', exit_code=139), ('15.84:50', ''), ) self.assertEqual( 7.92, self.vg._get_thin_pool_free_space('vg', 'thinpool') ) self.assertEqual(2, exec_mock.call_count) args = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g', '-o', 'size,data_percent', '--separator', ':', '--nosuffix', '/dev/vg/thinpool'] if self.configuration.lvm_suppress_fd_warnings: args.insert(2, 'LVM_SUPPRESS_FD_WARNINGS=1') lvs_call = mock.call(*args, root_helper='sudo', run_as_root=True) exec_mock.assert_has_calls([lvs_call, lvs_call]) def test_get_all_physical_volumes(self): # Filtered VG version pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg') self.assertEqual(3, len(pvs)) # Non-Filtered, all VG's pvs = self.vg.get_all_physical_volumes('sudo') self.assertEqual(4, len(pvs)) def test_get_physical_volumes(self): pvs = self.vg.get_physical_volumes() self.assertEqual(3, len(pvs)) def test_get_volume_groups(self): self.assertEqual(3, len(self.vg.get_all_volume_groups('sudo'))) self.assertEqual(1, len(self.vg.get_all_volume_groups('sudo', 'fake-vg'))) def test_thin_pool_creation_manual(self): # The size of fake-vg volume group is 10g, so the calculated thin # pool size should be 9.5g (95% of 10g). self.vg.create_thin_pool() def test_thin_pool_provisioned_capacity(self): self.vg.vg_thin_pool = "test-prov-cap-pool-unit" self.vg.vg_name = 'test-prov-cap-vg-unit' self.assertIsNone(self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) self.assertEqual(9.50, self.vg.vg_thin_pool_size) self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) self.assertEqual(3.0, self.vg.vg_provisioned_capacity) self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit" self.vg.vg_name = 'test-prov-cap-vg-no-unit' self.assertIsNone(self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) self.assertEqual(9.50, self.vg.vg_thin_pool_size) self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) self.assertEqual(3.0, self.vg.vg_provisioned_capacity) def test_thin_pool_free_space(self): # The size of fake-vg-pool is 9g and the allocated data sums up to # 12% so the calculated free space should be 7.92 self.assertEqual(float("7.92"), self.vg._get_thin_pool_free_space("fake-vg", "fake-vg-pool")) def test_volume_create_after_thin_creation(self): """Test self.vg.vg_thin_pool is set to pool_name See bug #1220286 for more info. """ vg_name = "vg-name" pool_name = vg_name + "-pool" pool_path = "%s/%s" % (vg_name, pool_name) def executor(obj, *cmd, **kwargs): self.assertEqual(pool_path, cmd[-1]) self.vg._executor = executor self.vg.create_thin_pool(pool_name) self.vg.create_volume("test", "1G", lv_type='thin') self.assertEqual(pool_name, self.vg.vg_thin_pool) def test_lv_has_snapshot(self): self.assertTrue(self.vg.lv_has_snapshot('fake-vg')) self.assertFalse(self.vg.lv_has_snapshot('test-volumes')) def test_activate_lv(self): self.vg._supports_lvchange_ignoreskipactivation = True with mock.patch.object(self.vg, '_execute') as mock_exec: self.vg.activate_lv('my-lv') expected = [mock.call('lvchange', '-a', 'y', '--yes', '-K', 'fake-vg/my-lv', root_helper='sudo', run_as_root=True)] self.assertEqual(expected, mock_exec.call_args_list) def test_get_mirrored_available_capacity(self): self.assertEqual(2.0, self.vg.vg_mirror_free_space(1)) def test_lv_extend(self): self.vg.deactivate_lv = mock.MagicMock() # Extend lv with snapshot and make sure deactivate called self.vg.create_volume("test", "1G") self.vg.extend_volume("test", "2G") self.vg.deactivate_lv.assert_called_once_with('test') self.vg.deactivate_lv.reset_mock() # Extend lv without snapshot so deactivate should not be called self.vg.create_volume("test", "1G") self.vg.vg_name = "test-volumes" self.vg.extend_volume("test", "2G") self.assertFalse(self.vg.deactivate_lv.called) def test_lv_deactivate(self): with mock.patch.object(self.vg, '_execute', return_value=(0, 0)): is_active_mock = mock.Mock() is_active_mock.return_value = False self.vg._lv_is_active = is_active_mock self.vg.create_volume('test', '1G') self.vg.deactivate_lv('test') @mock.patch('os_brick.utils._time_sleep') def test_lv_deactivate_timeout(self, mock_sleep): with mock.patch.object(self.vg, '_execute', return_value=(0, 0)): is_active_mock = mock.Mock() is_active_mock.return_value = True self.vg._lv_is_active = is_active_mock self.vg.create_volume('test', '1G') self.assertRaises(exception.VolumeNotDeactivated, self.vg.deactivate_lv, 'test') def test_lv_is_active(self): self.vg.create_volume('test', '1G') with mock.patch.object(self.vg, '_execute', return_value=['owi-a---', '']): self.assertTrue(self.vg._lv_is_active('test')) with mock.patch.object(self.vg, '_execute', return_value=['owi-----', '']): self.assertFalse(self.vg._lv_is_active('test')) class BrickLvmTestCaseIgnoreFDWarnings(BrickLvmTestCase): def setUp(self): self.configuration = mock.Mock() self.configuration.lvm_suppress_fd_warnings = True super(BrickLvmTestCaseIgnoreFDWarnings, self).setUp() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.627759 os_brick-6.11.0/os_brick/tests/privileged/0000775000175000017500000000000000000000000020504 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/privileged/__init__.py0000664000175000017500000000000000000000000022603 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/privileged/test_nvmeof.py0000664000175000017500000002443600000000000023420 0ustar00zuulzuul00000000000000# Copyright (c) 2021, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import builtins import errno from unittest import mock import ddt from oslo_concurrency import processutils as putils import os_brick.privileged as privsep_brick import os_brick.privileged.nvmeof as privsep_nvme from os_brick.privileged import rootwrap from os_brick.tests import base @ddt.ddt class PrivNVMeTestCase(base.TestCase): def setUp(self): super(PrivNVMeTestCase, self).setUp() # Disable privsep server/client mode privsep_brick.default.set_client_mode(False) self.addCleanup(privsep_brick.default.set_client_mode, True) @mock.patch('os.chmod') @mock.patch.object(builtins, 'open', new_callable=mock.mock_open) @mock.patch('os.makedirs') @mock.patch.object(rootwrap, 'custom_execute') def test_create_hostnqn(self, mock_exec, mock_mkdirs, mock_open, mock_chmod): hostnqn = mock.Mock() mock_exec.return_value = (hostnqn, mock.sentinel.err) res = privsep_nvme.create_hostnqn() mock_mkdirs.assert_called_once_with('/etc/nvme', mode=0o755, exist_ok=True) mock_exec.assert_called_once_with('nvme', 'show-hostnqn') mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'w') stripped_hostnqn = hostnqn.strip.return_value mock_open().write.assert_called_once_with(stripped_hostnqn) mock_chmod.assert_called_once_with('/etc/nvme/hostnqn', 0o644) self.assertEqual(stripped_hostnqn, res) @mock.patch('os.chmod') @mock.patch.object(builtins, 'open', new_callable=mock.mock_open) @mock.patch('os.makedirs') @mock.patch.object(rootwrap, 'custom_execute') def test_create_hostnqn_from_system_uuid(self, mock_exec, mock_mkdirs, mock_open, mock_chmod): system_uuid = 'ea841a98-444c-4abb-bd99-092b20518542' hostnqn = 'nqn.2014-08.org.nvmexpress:uuid:' + system_uuid res = privsep_nvme.create_hostnqn(system_uuid) mock_mkdirs.assert_called_once_with('/etc/nvme', mode=0o755, exist_ok=True) mock_exec.assert_not_called() mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'w') mock_open().write.assert_called_once_with(hostnqn) mock_chmod.assert_called_once_with('/etc/nvme/hostnqn', 0o644) self.assertEqual(hostnqn, res) @mock.patch('os.chmod') @mock.patch.object(builtins, 'open', new_callable=mock.mock_open) @mock.patch('os.makedirs') @mock.patch.object(rootwrap, 'custom_execute') def test_create_hostnqn_generate(self, mock_exec, mock_mkdirs, mock_open, mock_chmod): hostnqn = mock.Mock() mock_exec.side_effect = [ putils.ProcessExecutionError(exit_code=errno.ENOENT, stdout="totally exist sub-command", stderr=''), (hostnqn, mock.sentinel.err) ] res = privsep_nvme.create_hostnqn() mock_mkdirs.assert_called_once_with('/etc/nvme', mode=0o755, exist_ok=True) self.assertEqual(2, mock_exec.call_count) mock_exec.assert_has_calls([mock.call('nvme', 'show-hostnqn'), mock.call('nvme', 'gen-hostnqn')]) mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'w') stripped_hostnqn = hostnqn.strip.return_value mock_open().write.assert_called_once_with(stripped_hostnqn) mock_chmod.assert_called_once_with('/etc/nvme/hostnqn', 0o644) self.assertEqual(stripped_hostnqn, res) @ddt.data((231, 'error: Invalid sub-command\n', ''), (254, '', 'hostnqn is not available -- use nvme gen-hostnqn\n')) @ddt.unpack @mock.patch('os.chmod') @mock.patch.object(builtins, 'open', new_callable=mock.mock_open) @mock.patch('os.makedirs') @mock.patch.object(rootwrap, 'custom_execute') def test_create_hostnqn_generate_old_nvme_cli( self, exit_code, stdout, stderr, mock_exec, mock_mkdirs, mock_open, mock_chmod): hostnqn = mock.Mock() mock_exec.side_effect = [ putils.ProcessExecutionError( exit_code=exit_code, stdout=stdout, stderr=stderr), (hostnqn, mock.sentinel.err) ] res = privsep_nvme.create_hostnqn() mock_mkdirs.assert_called_once_with('/etc/nvme', mode=0o755, exist_ok=True) self.assertEqual(2, mock_exec.call_count) mock_exec.assert_has_calls([mock.call('nvme', 'show-hostnqn'), mock.call('nvme', 'gen-hostnqn')]) mock_open.assert_called_once_with('/etc/nvme/hostnqn', 'w') stripped_hostnqn = hostnqn.strip.return_value mock_open().write.assert_called_once_with(stripped_hostnqn) mock_chmod.assert_called_once_with('/etc/nvme/hostnqn', 0o644) self.assertEqual(stripped_hostnqn, res) @ddt.data(OSError(errno.ENOENT), # nvme not present in system putils.ProcessExecutionError(exit_code=123)) # nvme error @mock.patch('os.makedirs') @mock.patch.object(rootwrap, 'custom_execute') def test_create_hostnqn_nvme_not_present(self, exception, mock_exec, mock_mkdirs): mock_exec.side_effect = exception res = privsep_nvme.create_hostnqn() mock_mkdirs.assert_called_once_with('/etc/nvme', mode=0o755, exist_ok=True) mock_exec.assert_called_once_with('nvme', 'show-hostnqn') self.assertEqual('', res) @mock.patch('os.chmod') @mock.patch.object(builtins, 'open', new_callable=mock.mock_open) @mock.patch('os.makedirs') def test_create_hostid(self, mock_mkdirs, mock_open, mock_chmod): res = privsep_nvme.create_hostid('uuid') mock_mkdirs.assert_called_once_with('/etc/nvme', mode=0o755, exist_ok=True) mock_open.assert_called_once_with('/etc/nvme/hostid', 'w') mock_open().write.assert_called_once_with('uuid\n') mock_chmod.assert_called_once_with('/etc/nvme/hostid', 0o644) self.assertEqual('uuid', res) @mock.patch('os.chmod') @mock.patch.object(builtins, 'open', new_callable=mock.mock_open) @mock.patch('os.makedirs') def test_create_hostid_fails(self, mock_mkdirs, mock_open, mock_chmod): mock_mkdirs.side_effect = OSError res = privsep_nvme.create_hostid(None) mock_mkdirs.assert_called_once_with('/etc/nvme', mode=0o755, exist_ok=True) mock_open.assert_not_called() mock_chmod.assert_not_called() self.assertIsNone(res) @mock.patch.object(builtins, 'open', new_callable=mock.mock_open) def test_get_system_uuid_product_uuid(self, mock_open): uuid = 'dbc6ba60-36ae-4b96-9310-628832bdfc3d' mock_fd = mock_open.return_value.__enter__.return_value mock_fd.read.return_value = uuid res = privsep_nvme.get_system_uuid() self.assertEqual(uuid, res) mock_open.assert_called_once_with('/sys/class/dmi/id/product_uuid', 'r') mock_fd.read.assert_called_once_with() @mock.patch.object(builtins, 'open', side_effect=Exception) @mock.patch.object(rootwrap, 'custom_execute') def test_get_system_uuid_dmidecode(self, mock_exec, mock_open): uuid = 'dbc6ba60-36ae-4b96-9310-628832bdfc3d' mock_exec.return_value = (f' {uuid} ', '') res = privsep_nvme.get_system_uuid() self.assertEqual(uuid, res) mock_open.assert_called_once_with('/sys/class/dmi/id/product_uuid', 'r') mock_exec.assert_called_once_with('dmidecode', '-ssystem-uuid') @mock.patch.object(builtins, 'open', side_effect=Exception) @mock.patch.object(rootwrap, 'custom_execute', return_value=('', '')) def test_get_system_uuid_dmidecode_empty(self, mock_exec, mock_open): res = privsep_nvme.get_system_uuid() self.assertEqual('', res) mock_open.assert_called_once_with('/sys/class/dmi/id/product_uuid', 'r') mock_exec.assert_called_once_with('dmidecode', '-ssystem-uuid') @mock.patch.object(builtins, 'open', side_effect=Exception) @mock.patch.object(rootwrap, 'custom_execute', side_effect=putils.ProcessExecutionError) def test_get_system_uuid_failure(self, mock_exec, mock_open): res = privsep_nvme.get_system_uuid() self.assertEqual('', res) mock_open.assert_called_once_with('/sys/class/dmi/id/product_uuid', 'r') mock_exec.assert_called_once_with('dmidecode', '-ssystem-uuid') @mock.patch.object(builtins, 'open', side_effect=Exception) @mock.patch.object(rootwrap, 'custom_execute', side_effect=FileNotFoundError) def test_get_system_uuid_dmidecode_missing(self, mock_exec, mock_open): res = privsep_nvme.get_system_uuid() self.assertEqual('', res) mock_open.assert_called_once_with('/sys/class/dmi/id/product_uuid', 'r') mock_exec.assert_called_once_with('dmidecode', '-ssystem-uuid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/privileged/test_rbd.py0000664000175000017500000000671000000000000022670 0ustar00zuulzuul00000000000000# Copyright (c) 2020, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import os_brick.privileged as privsep_brick import os_brick.privileged.rbd as privsep_rbd from os_brick.tests import base class PrivRBDTestCase(base.TestCase): def setUp(self): super(PrivRBDTestCase, self).setUp() # Disable privsep server/client mode privsep_brick.default.set_client_mode(False) self.addCleanup(privsep_brick.default.set_client_mode, True) @mock.patch('oslo_utils.importutils.import_class') def test__get_rbd_class(self, mock_import): self.assertIsNone(privsep_rbd.RBDConnector) self.assertIs(privsep_rbd._get_rbd_class, privsep_rbd.get_rbd_class) self.addCleanup(setattr, privsep_rbd, 'RBDConnector', None) self.addCleanup(setattr, privsep_rbd, 'get_rbd_class', privsep_rbd._get_rbd_class) privsep_rbd._get_rbd_class() mock_import.assert_called_once_with( 'os_brick.initiator.connectors.rbd.RBDConnector') self.assertEqual(mock_import.return_value, privsep_rbd.RBDConnector) self.assertIsNot(privsep_rbd._get_rbd_class, privsep_rbd.get_rbd_class) @mock.patch.object(privsep_rbd, 'get_rbd_class') @mock.patch('oslo_utils.fileutils.delete_if_exists') def test_delete_if_exists(self, mock_delete, mock_get_class): res = privsep_rbd.delete_if_exists(mock.sentinel.path) mock_get_class.assert_not_called() mock_delete.assert_called_once_with(mock.sentinel.path) self.assertIs(mock_delete.return_value, res) @mock.patch.object(privsep_rbd, 'get_rbd_class') @mock.patch.object(privsep_rbd, 'RBDConnector') def test_root_create_ceph_conf(self, mock_connector, mock_get_class): s = mock.sentinel res = privsep_rbd.root_create_ceph_conf(s.monitor_ips, s.monitor_ports, s.cluster_name, s.user, s.keyring) mock_get_class.assert_called_once_with() mock_connector._create_ceph_conf.assert_called_once_with( s.monitor_ips, s.monitor_ports, s.cluster_name, s.user, s.keyring) self.assertIs(mock_connector._create_ceph_conf.return_value, res) @mock.patch.object(privsep_rbd, 'get_rbd_class') @mock.patch.object(privsep_rbd, 'open') @mock.patch.object(privsep_rbd, 'RBDConnector') def test_check_valid_path(self, mock_connector, mock_open, mock_get_class): res = privsep_rbd.check_valid_path(mock.sentinel.path) mock_get_class.assert_called_once_with() mock_open.assert_called_once_with(mock.sentinel.path, 'rb') mock_connector._check_valid_device.assert_called_once_with( mock_open.return_value.__enter__.return_value) self.assertEqual(mock_connector._check_valid_device.return_value, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/privileged/test_rootwrap.py0000664000175000017500000002201200000000000023767 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_concurrency import processutils as putils from os_brick import exception from os_brick import privileged from os_brick.privileged import rootwrap as priv_rootwrap from os_brick.tests import base @ddt.ddt class PrivRootwrapTestCase(base.TestCase): def setUp(self): super(PrivRootwrapTestCase, self).setUp() # Bypass privsep and run these simple functions in-process # (allows reading back the modified state of mocks) privileged.default.set_client_mode(False) self.addCleanup(privileged.default.set_client_mode, True) @mock.patch('os_brick.privileged.rootwrap.execute_root') @mock.patch('oslo_concurrency.processutils.execute') def test_execute(self, mock_putils_exec, mock_exec_root): priv_rootwrap.execute('echo', 'foo', run_as_root=False) self.assertFalse(mock_exec_root.called) priv_rootwrap.execute('echo', 'foo', run_as_root=True, root_helper='baz', check_exit_code=0) mock_exec_root.assert_called_once_with( 'echo', 'foo', check_exit_code=0) @mock.patch('oslo_concurrency.processutils.execute') def test_execute_root(self, mock_putils_exec): priv_rootwrap.execute_root('echo', 'foo', check_exit_code=0) mock_putils_exec.assert_called_once_with( 'echo', 'foo', check_exit_code=0, shell=False, run_as_root=False, delay_on_retry=False, on_completion=mock.ANY, on_execute=mock.ANY) # Exact exception isn't particularly important, but these # should be errors: self.assertRaises(TypeError, priv_rootwrap.execute_root, 'foo', shell=True) self.assertRaises(TypeError, priv_rootwrap.execute_root, 'foo', run_as_root=True) @mock.patch('oslo_concurrency.processutils.execute', side_effect=OSError(42, 'mock error')) def test_oserror_raise(self, mock_putils_exec): self.assertRaises(putils.ProcessExecutionError, priv_rootwrap.execute, 'foo') @mock.patch.object(priv_rootwrap.execute_root.privsep_entrypoint, 'client_mode', False) @mock.patch.object(priv_rootwrap, 'custom_execute') def test_execute_as_root(self, exec_mock): res = priv_rootwrap.execute(mock.sentinel.cmds, run_as_root=True, root_helper=mock.sentinel.root_helper, keyword_arg=mock.sentinel.kwarg) self.assertEqual(exec_mock.return_value, res) exec_mock.assert_called_once_with(mock.sentinel.cmds, shell=False, run_as_root=False, keyword_arg=mock.sentinel.kwarg) @mock.patch('threading.Timer') def test_custom_execute_default_timeout(self, mock_timer): """Confirm timeout defaults to 600 and the thread timer is started.""" priv_rootwrap.custom_execute('echo', 'hola') mock_timer.assert_called_once_with(600, mock.ANY, mock.ANY) mock_timer.return_value.start.assert_called_once_with() def test_custom_execute_callbacks(self): """Confirm execute callbacks are called on execute.""" on_execute = mock.Mock() on_completion = mock.Mock() msg = 'hola' out, err = priv_rootwrap.custom_execute('echo', msg, on_execute=on_execute, on_completion=on_completion) self.assertEqual(msg + '\n', out) self.assertEqual('', err) on_execute.assert_called_once_with(mock.ANY) proc = on_execute.call_args[0][0] on_completion.assert_called_once_with(proc) @mock.patch('os_brick.utils._time_sleep') def test_custom_execute_timeout_raises_with_retries(self, sleep_mock): on_execute = mock.Mock() on_completion = mock.Mock() self.assertRaises(exception.ExecutionTimeout, priv_rootwrap.custom_execute, 'sleep', '2', timeout=0.05, raise_timeout=True, interval=2, backoff_rate=3, attempts=3, on_execute=on_execute, on_completion=on_completion) sleep_mock.assert_has_calls([mock.call(0), mock.call(6), mock.call(0), mock.call(18), mock.call(0)]) expected_calls = [mock.call(args[0][0]) for args in on_execute.call_args_list] on_execute.assert_has_calls(expected_calls) on_completion.assert_has_calls(expected_calls) def test_custom_execute_timeout_no_raise(self): out, err = priv_rootwrap.custom_execute('sleep', '2', timeout=0.05, raise_timeout=False) self.assertEqual('', out) self.assertIsInstance(err, str) def test_custom_execute_check_exit_code(self): self.assertRaises(putils.ProcessExecutionError, priv_rootwrap.custom_execute, 'ls', '-y', check_exit_code=True) def test_custom_execute_no_check_exit_code(self): out, err = priv_rootwrap.custom_execute('ls', '-y', check_exit_code=False) self.assertEqual('', out) self.assertIsInstance(err, str) @mock.patch.object(priv_rootwrap.unlink_root.privsep_entrypoint, 'client_mode', False) @mock.patch('os.unlink', side_effect=IOError) def test_unlink_root(self, unlink_mock): links = ['/dev/disk/by-id/link1', '/dev/disk/by-id/link2'] priv_rootwrap.unlink_root(*links, no_errors=True) unlink_mock.assert_has_calls([mock.call(links[0]), mock.call(links[1])]) @mock.patch.object(priv_rootwrap.unlink_root.privsep_entrypoint, 'client_mode', False) @mock.patch('os.unlink', side_effect=IOError) def test_unlink_root_raise(self, unlink_mock): links = ['/dev/disk/by-id/link1', '/dev/disk/by-id/link2'] self.assertRaises(IOError, priv_rootwrap.unlink_root, *links, no_errors=False) unlink_mock.assert_called_once_with(links[0]) @mock.patch.object(priv_rootwrap.unlink_root.privsep_entrypoint, 'client_mode', False) @mock.patch('os.unlink', side_effect=IOError) def test_unlink_root_raise_at_end(self, unlink_mock): links = ['/dev/disk/by-id/link1', '/dev/disk/by-id/link2'] self.assertRaises(exception.ExceptionChainer, priv_rootwrap.unlink_root, *links, raise_at_end=True) unlink_mock.assert_has_calls([mock.call(links[0]), mock.call(links[1])]) @mock.patch.object(priv_rootwrap.unlink_root.privsep_entrypoint, 'client_mode', False) @mock.patch('os.symlink') @mock.patch('os.remove') def test_link_root_no_force(self, mock_remove, mock_link): priv_rootwrap.link_root(mock.sentinel.target, mock.sentinel.link_name, force=False) mock_remove.assert_not_called() mock_link.assert_called_once_with(mock.sentinel.target, mock.sentinel.link_name) @ddt.data(None, FileNotFoundError) @mock.patch.object(priv_rootwrap.unlink_root.privsep_entrypoint, 'client_mode', False) @mock.patch('os.symlink') @mock.patch('os.remove') def test_link_root_force(self, remove_effect, mock_remove, mock_link): mock_remove.side_effect = remove_effect priv_rootwrap.link_root(mock.sentinel.target, mock.sentinel.link_name) mock_remove.assert_called_once_with(mock.sentinel.link_name) mock_link.assert_called_once_with(mock.sentinel.target, mock.sentinel.link_name) @mock.patch.object(priv_rootwrap.unlink_root.privsep_entrypoint, 'client_mode', False) @mock.patch('os.symlink') @mock.patch('os.remove', side_effect=IndexError) # Non not found error def test_link_root_force_fail(self, mock_remove, mock_link): self.assertRaises(IndexError, priv_rootwrap.link_root, mock.sentinel.target, mock.sentinel.link_name) mock_remove.assert_called_once_with(mock.sentinel.link_name) mock_link.assert_not_called() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.627759 os_brick-6.11.0/os_brick/tests/remotefs/0000775000175000017500000000000000000000000020176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/remotefs/__init__.py0000664000175000017500000000000000000000000022275 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/remotefs/test_remotefs.py0000664000175000017500000002711000000000000023434 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile from unittest import mock from oslo_concurrency import processutils as putils from os_brick import exception from os_brick.privileged import rootwrap as priv_rootwrap from os_brick.remotefs import remotefs from os_brick.tests import base class RemoteFsClientTestCase(base.TestCase): def setUp(self): super(RemoteFsClientTestCase, self).setUp() self.mock_execute = self.mock_object(priv_rootwrap, 'execute', return_value=None) @mock.patch.object(remotefs.RemoteFsClient, '_read_mounts', return_value=[]) def test_cifs(self, mock_read_mounts): client = remotefs.RemoteFsClient("cifs", root_helper='true', smbfs_mount_point_base='/mnt') share = '10.0.0.1:/qwe' mount_point = client.get_mount_point(share) client.mount(share) calls = [mock.call('mkdir', '-p', mount_point, check_exit_code=0), mock.call('mount', '-t', 'cifs', share, mount_point, run_as_root=True, root_helper='true', check_exit_code=0)] self.mock_execute.assert_has_calls(calls) @mock.patch.object(remotefs.RemoteFsClient, '_read_mounts', return_value=[]) def test_nfs(self, mock_read_mounts): client = remotefs.RemoteFsClient("nfs", root_helper='true', nfs_mount_point_base='/mnt') share = '10.0.0.1:/qwe' mount_point = client.get_mount_point(share) client.mount(share) calls = [mock.call('mkdir', '-p', mount_point, check_exit_code=0), mock.call('mount', '-t', 'nfs', '-o', 'vers=4,minorversion=1', share, mount_point, check_exit_code=0, run_as_root=True, root_helper='true')] self.mock_execute.assert_has_calls(calls) def test_read_mounts(self): mounts = """device1 mnt_point1 ext4 rw,seclabel,relatime 0 0 device2 mnt_point2 ext4 rw,seclabel,relatime 0 0""" with mock.patch('os_brick.remotefs.remotefs.open', mock.mock_open(read_data=mounts)) as mock_open: client = remotefs.RemoteFsClient("cifs", root_helper='true', smbfs_mount_point_base='/mnt') ret = client._read_mounts() mock_open.assert_called_once_with('/proc/mounts', 'r') self.assertEqual(ret, {'mnt_point1': 'device1', 'mnt_point2': 'device2'}) @mock.patch.object(priv_rootwrap, 'execute') @mock.patch.object(remotefs.RemoteFsClient, '_do_mount') def test_mount_already_mounted(self, mock_do_mount, mock_execute): share = "10.0.0.1:/share" client = remotefs.RemoteFsClient("cifs", root_helper='true', smbfs_mount_point_base='/mnt') mounts = {client.get_mount_point(share): 'some_dev'} with mock.patch.object(client, '_read_mounts', return_value=mounts): client.mount(share) self.assertEqual(mock_do_mount.call_count, 0) self.assertEqual(mock_execute.call_count, 0) @mock.patch.object(priv_rootwrap, 'execute') def test_mount_race(self, mock_execute): err_msg = 'mount.nfs: /var/asdf is already mounted' mock_execute.side_effect = putils.ProcessExecutionError(stderr=err_msg) mounts = {'192.0.2.20:/share': '/var/asdf/'} client = remotefs.RemoteFsClient("nfs", root_helper='true', nfs_mount_point_base='/var/asdf') with mock.patch.object(client, '_read_mounts', return_value=mounts): client._do_mount('nfs', '192.0.2.20:/share', '/var/asdf') @mock.patch.object(priv_rootwrap, 'execute') def test_mount_failure(self, mock_execute): err_msg = 'mount.nfs: nfs broke' mock_execute.side_effect = putils.ProcessExecutionError(stderr=err_msg) client = remotefs.RemoteFsClient("nfs", root_helper='true', nfs_mount_point_base='/var/asdf') self.assertRaises(putils.ProcessExecutionError, client._do_mount, 'nfs', '192.0.2.20:/share', '/var/asdf') def _test_no_mount_point(self, fs_type): self.assertRaises(exception.InvalidParameterValue, remotefs.RemoteFsClient, fs_type, root_helper='true') def test_no_mount_point_nfs(self): self._test_no_mount_point('nfs') def test_no_mount_point_cifs(self): self._test_no_mount_point('cifs') def test_no_mount_point_glusterfs(self): self._test_no_mount_point('glusterfs') def test_no_mount_point_vzstorage(self): self._test_no_mount_point('vzstorage') def test_no_mount_point_quobyte(self): self._test_no_mount_point('quobyte') def test_invalid_fs(self): self.assertRaises(exception.ProtocolNotSupported, remotefs.RemoteFsClient, 'my_fs', root_helper='true') def test_init_sets_mount_base(self): client = remotefs.RemoteFsClient("cifs", root_helper='true', smbfs_mount_point_base='/fake', cifs_mount_point_base='/fake2') # Tests that although the FS type is "cifs", the config option # starts with "smbfs_" self.assertEqual('/fake', client._mount_base) @mock.patch('os_brick.remotefs.remotefs.RemoteFsClient._check_nfs_options') def test_init_nfs_calls_check_nfs_options(self, mock_check_nfs_options): remotefs.RemoteFsClient("nfs", root_helper='true', nfs_mount_point_base='/fake') mock_check_nfs_options.assert_called_once_with() class VZStorageRemoteFSClientTestVase(RemoteFsClientTestCase): @mock.patch.object(remotefs.RemoteFsClient, '_read_mounts', return_value=[]) def test_vzstorage_by_cluster_name(self, mock_read_mounts): client = remotefs.VZStorageRemoteFSClient( "vzstorage", root_helper='true', vzstorage_mount_point_base='/mnt') share = 'qwe' cluster_name = share mount_point = client.get_mount_point(share) client.mount(share) calls = [mock.call('mkdir', '-p', mount_point, check_exit_code=0), mock.call('pstorage-mount', '-c', cluster_name, mount_point, root_helper='true', check_exit_code=0, run_as_root=True)] self.mock_execute.assert_has_calls(calls) @mock.patch.object(remotefs.RemoteFsClient, '_read_mounts', return_value=[]) def test_vzstorage_with_auth(self, mock_read_mounts): client = remotefs.VZStorageRemoteFSClient( "vzstorage", root_helper='true', vzstorage_mount_point_base='/mnt') cluster_name = 'qwe' password = '123456' share = '%s:%s' % (cluster_name, password) mount_point = client.get_mount_point(share) client.mount(share) calls = [mock.call('mkdir', '-p', mount_point, check_exit_code=0), mock.call('pstorage', '-c', cluster_name, 'auth-node', '-P', process_input=password, root_helper='true', run_as_root=True), mock.call('pstorage-mount', '-c', cluster_name, mount_point, root_helper='true', check_exit_code=0, run_as_root=True)] self.mock_execute.assert_has_calls(calls) @mock.patch('os.path.exists', return_value=False) @mock.patch.object(remotefs.RemoteFsClient, '_read_mounts', return_value=[]) def test_vzstorage_with_mds_list(self, mock_read_mounts, mock_exists): client = remotefs.VZStorageRemoteFSClient( "vzstorage", root_helper='true', vzstorage_mount_point_base='/mnt') cluster_name = 'qwe' mds_list = ['10.0.0.1', '10.0.0.2'] share = '%s:/%s' % (','.join(mds_list), cluster_name) mount_point = client.get_mount_point(share) vz_conf_dir = os.path.join('/etc/pstorage/clusters/', cluster_name) tmp_dir = '/tmp/fake_dir/' with mock.patch.object(tempfile, 'mkdtemp', return_value=tmp_dir): with mock.patch('os_brick.remotefs.remotefs.open', new_callable=mock.mock_open) as mock_open: client.mount(share) write_calls = [mock.call(tmp_dir + 'bs_list', 'w'), mock.call().__enter__(), mock.call().write('10.0.0.1\n'), mock.call().write('10.0.0.2\n'), mock.call().__exit__(None, None, None)] mock_open.assert_has_calls(write_calls) calls = [mock.call('mkdir', '-p', mount_point, check_exit_code=0), mock.call('cp', '-rf', tmp_dir, vz_conf_dir, run_as_root=True, root_helper='true'), mock.call('chown', '-R', 'root:root', vz_conf_dir, run_as_root=True, root_helper='true'), mock.call('pstorage-mount', '-c', cluster_name, mount_point, root_helper='true', check_exit_code=0, run_as_root=True)] self.mock_execute.assert_has_calls(calls) @mock.patch.object(remotefs.RemoteFsClient, '_read_mounts', return_value=[]) def test_vzstorage_invalid_share(self, mock_read_mounts): client = remotefs.VZStorageRemoteFSClient( "vzstorage", root_helper='true', vzstorage_mount_point_base='/mnt') self.assertRaises(exception.BrickException, client.mount, ':') class ScalityRemoteFsClientTestCase(base.TestCase): def test_no_mount_point_scality(self): self.assertRaises(exception.InvalidParameterValue, remotefs.ScalityRemoteFsClient, 'scality', root_helper='true') def test_get_mount_point(self): fsclient = remotefs.ScalityRemoteFsClient( 'scality', root_helper='true', scality_mount_point_base='/fake') self.assertEqual('/fake/path/00', fsclient.get_mount_point('path')) @mock.patch('oslo_concurrency.processutils.execute', return_value=None) @mock.patch('os_brick.remotefs.remotefs.RemoteFsClient._do_mount') def test_mount(self, mock_do_mount, mock_execute): fsclient = remotefs.ScalityRemoteFsClient( 'scality', root_helper='true', scality_mount_point_base='/fake', execute=putils.execute) with mock.patch.object(fsclient, '_read_mounts', return_value={}): fsclient.mount('fake') mock_execute.assert_called_once_with( 'mkdir', '-p', '/fake', check_exit_code=0) mock_do_mount.assert_called_once_with( 'sofs', '/etc/sfused.conf', '/fake') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/remotefs/test_windows_remotefs.py0000664000175000017500000001475000000000000025214 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_brick import exception from os_brick.remotefs import windows_remotefs from os_brick.tests import base @ddt.ddt class WindowsRemotefsClientTestCase(base.TestCase): _FAKE_SHARE_NAME = 'fake_share' _FAKE_SHARE_SERVER = 'fake_share_server' _FAKE_SHARE = '\\\\%s\\%s' % (_FAKE_SHARE_SERVER, _FAKE_SHARE_NAME) @mock.patch.object(windows_remotefs, 'utilsfactory') def setUp(self, mock_utilsfactory): super(WindowsRemotefsClientTestCase, self).setUp() self._remotefs = windows_remotefs.WindowsRemoteFsClient( mount_type='smbfs') self._remotefs._mount_base = mock.sentinel.mount_base self._smbutils = self._remotefs._smbutils self._pathutils = self._remotefs._pathutils @ddt.data({'is_local_share': False}, {'expect_existing': False}) @ddt.unpack def test_get_local_share_path_missing(self, expect_existing=True, is_local_share=True): self._smbutils.get_smb_share_path.return_value = None self._smbutils.is_local_share.return_value = is_local_share if expect_existing: self.assertRaises( exception.VolumePathsNotFound, self._remotefs.get_local_share_path, self._FAKE_SHARE, expect_existing=expect_existing) else: share_path = self._remotefs.get_local_share_path( self._FAKE_SHARE, expect_existing=expect_existing) self.assertIsNone(share_path) self.assertEqual(is_local_share, self._smbutils.get_smb_share_path.called) self._smbutils.is_local_share.assert_called_once_with(self._FAKE_SHARE) @ddt.data({'share': '//addr/share_name/subdir_a/subdir_b', 'exp_path': r'C:\shared_dir\subdir_a\subdir_b'}, {'share': '//addr/share_name', 'exp_path': r'C:\shared_dir'}) @ddt.unpack @mock.patch('os.path.join', lambda *args: '\\'.join(args)) def test_get_local_share_path(self, share, exp_path): fake_local_path = 'C:\\shared_dir' self._smbutils.get_smb_share_path.return_value = fake_local_path share_path = self._remotefs.get_local_share_path(share) self.assertEqual(exp_path, share_path) self._smbutils.get_smb_share_path.assert_called_once_with( 'share_name') def test_get_share_name(self): resulted_name = self._remotefs.get_share_name(self._FAKE_SHARE) self.assertEqual(self._FAKE_SHARE_NAME, resulted_name) @ddt.data(True, False) @mock.patch.object(windows_remotefs.WindowsRemoteFsClient, '_create_mount_point') def test_mount(self, is_local_share, mock_create_mount_point): flags = '-o pass=password' self._remotefs._mount_options = '-o user=username,randomopt' self._remotefs._local_path_for_loopback = True self._smbutils.check_smb_mapping.return_value = False self._smbutils.is_local_share.return_value = is_local_share self._remotefs.mount(self._FAKE_SHARE, flags) if is_local_share: self.assertFalse(self._smbutils.check_smb_mapping.called) self.assertFalse(self._smbutils.mount_smb_share.called) else: self._smbutils.check_smb_mapping.assert_called_once_with( self._FAKE_SHARE) self._smbutils.mount_smb_share.assert_called_once_with( self._FAKE_SHARE, username='username', password='password') mock_create_mount_point.assert_called_once_with(self._FAKE_SHARE, is_local_share) def test_unmount(self): self._remotefs.unmount(self._FAKE_SHARE) self._smbutils.unmount_smb_share.assert_called_once_with( self._FAKE_SHARE) @ddt.data({'use_local_path': True}, {'path_exists': True, 'is_symlink': True}, {'path_exists': True}) @mock.patch.object(windows_remotefs.WindowsRemoteFsClient, 'get_local_share_path') @mock.patch.object(windows_remotefs.WindowsRemoteFsClient, 'get_mount_point') @mock.patch.object(windows_remotefs, 'os') @ddt.unpack def test_create_mount_point(self, mock_os, mock_get_mount_point, mock_get_local_share_path, path_exists=False, is_symlink=False, use_local_path=False): mock_os.path.exists.return_value = path_exists mock_os.isdir.return_value = False self._pathutils.is_symlink.return_value = is_symlink if path_exists and not is_symlink: self.assertRaises(exception.BrickException, self._remotefs._create_mount_point, self._FAKE_SHARE, use_local_path) else: self._remotefs._create_mount_point(self._FAKE_SHARE, use_local_path) mock_get_mount_point.assert_called_once_with(self._FAKE_SHARE) mock_os.path.isdir.assert_called_once_with(mock.sentinel.mount_base) if use_local_path: mock_get_local_share_path.assert_called_once_with( self._FAKE_SHARE) expected_symlink_target = mock_get_local_share_path.return_value else: expected_symlink_target = self._FAKE_SHARE.replace('/', '\\') if path_exists: self._pathutils.is_symlink.assert_called_once_with( mock_get_mount_point.return_value) else: self._pathutils.create_sym_link.assert_called_once_with( mock_get_mount_point.return_value, expected_symlink_target) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/test_brick.py0000664000175000017500000000135000000000000021054 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_os_brick ---------------------------------- Tests for `os_brick` module. """ from os_brick.tests import base class TestBrick(base.TestCase): def test_something(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/test_exception.py0000664000175000017500000000424000000000000021761 0ustar00zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick import exception from os_brick.tests import base class BrickExceptionTestCase(base.TestCase): def test_default_error_msg(self): class FakeBrickException(exception.BrickException): message = "default message" exc = FakeBrickException() self.assertEqual(str(exc), 'default message') def test_error_msg(self): self.assertEqual(str(exception.BrickException('test')), 'test') def test_default_error_msg_with_kwargs(self): class FakeBrickException(exception.BrickException): message = "default message: %(code)s" exc = FakeBrickException(code=500) self.assertEqual(str(exc), 'default message: 500') def test_error_msg_exception_with_kwargs(self): class FakeBrickException(exception.BrickException): message = "default message: %(mispelled_code)s" exc = FakeBrickException(code=500) self.assertEqual(str(exc), 'default message: %(mispelled_code)s') def test_default_error_code(self): class FakeBrickException(exception.BrickException): code = 404 exc = FakeBrickException() self.assertEqual(exc.kwargs['code'], 404) def test_error_code_from_kwarg(self): class FakeBrickException(exception.BrickException): code = 500 exc = FakeBrickException(code=404) self.assertEqual(exc.kwargs['code'], 404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/test_executor.py0000664000175000017500000001477700000000000021641 0ustar00zuulzuul00000000000000# encoding=utf8 # (c) Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from unittest import mock from oslo_concurrency import processutils as putils from oslo_context import context as context_utils from os_brick import executor as brick_executor from os_brick.privileged import rootwrap from os_brick.tests import base class TestExecutor(base.TestCase): def test_default_execute(self): executor = brick_executor.Executor(root_helper=None) self.assertEqual(rootwrap.execute, executor._Executor__execute) def test_none_execute(self): executor = brick_executor.Executor(root_helper=None, execute=None) self.assertEqual(rootwrap.execute, executor._Executor__execute) def test_fake_execute(self): mock_execute = mock.Mock() executor = brick_executor.Executor(root_helper=None, execute=mock_execute) self.assertEqual(mock_execute, executor._Executor__execute) @mock.patch('sys.stdin', encoding='UTF-8') @mock.patch('os_brick.executor.priv_rootwrap.execute') def test_execute_non_safe_str_exception(self, execute_mock, stdin_mock): execute_mock.side_effect = putils.ProcessExecutionError( stdout='España', stderr='Zürich') executor = brick_executor.Executor(root_helper=None) exc = self.assertRaises(putils.ProcessExecutionError, executor._execute) self.assertEqual('Espa\xf1a', exc.stdout) self.assertEqual('Z\xfcrich', exc.stderr) @mock.patch('sys.stdin', encoding='UTF-8') @mock.patch('os_brick.executor.priv_rootwrap.execute') def test_execute_non_safe_str(self, execute_mock, stdin_mock): execute_mock.return_value = ('España', 'Zürich') executor = brick_executor.Executor(root_helper=None) stdout, stderr = executor._execute() self.assertEqual('Espa\xf1a', stdout) self.assertEqual('Z\xfcrich', stderr) @mock.patch('sys.stdin', encoding='UTF-8') @mock.patch('os_brick.executor.priv_rootwrap.execute') def test_execute_non_safe_bytes_exception(self, execute_mock, stdin_mock): execute_mock.side_effect = putils.ProcessExecutionError( stdout=bytes('España', 'utf-8'), stderr=bytes('Zürich', 'utf-8')) executor = brick_executor.Executor(root_helper=None) exc = self.assertRaises(putils.ProcessExecutionError, executor._execute) self.assertEqual('Espa\xf1a', exc.stdout) self.assertEqual('Z\xfcrich', exc.stderr) @mock.patch('sys.stdin', encoding='UTF-8') @mock.patch('os_brick.executor.priv_rootwrap.execute') def test_execute_non_safe_bytes(self, execute_mock, stdin_mock): execute_mock.return_value = (bytes('España', 'utf-8'), bytes('Zürich', 'utf-8')) executor = brick_executor.Executor(root_helper=None) stdout, stderr = executor._execute() self.assertEqual('Espa\xf1a', stdout) self.assertEqual('Z\xfcrich', stderr) class TestThread(base.TestCase): def _store_context(self, result): """Stores current thread's context in result list.""" result.append(context_utils.get_current()) def _run_threads(self, threads): for thread in threads: thread.start() for thread in threads: thread.join() def _do_test(self, thread_class, expected, result=None): if result is None: result = [] threads = [thread_class(target=self._store_context, args=[result]) for i in range(3)] self._run_threads(threads) self.assertEqual([expected] * len(threads), result) def test_normal_thread(self): """Test normal threads don't inherit parent's context.""" context = context_utils.RequestContext() context.update_store() self._do_test(threading.Thread, None) def test_no_context(self, result=None): """Test when parent has no context.""" context_utils._request_store.context = None self._do_test(brick_executor.Thread, None, result) def test_with_context(self, result=None): """Test that our class actually inherits the context.""" context = context_utils.RequestContext() context.update_store() self._do_test(brick_executor.Thread, context, result) def _run_test(self, test_method, test_args, result): """Run one of the normal tests and store the result. Meant to be run in a different thread, thus the need to store the result, because by the time the join call completes the test's stack is no longer available and the exception will have been lost. """ try: test_method(test_args) result.append(True) except Exception: result.append(False) raise def test_no_cross_mix(self): """Test there's no shared global context between threads.""" result = [] contexts = [[], [], []] threads = [threading.Thread(target=self._run_test, args=[self.test_with_context, contexts[0], result]), threading.Thread(target=self._run_test, args=[self.test_no_context, contexts[1], result]), threading.Thread(target=self._run_test, args=[self.test_with_context, contexts[2], result])] self._run_threads(threads) # Check that all tests run without raising an exception self.assertEqual([True, True, True], result) # Check that the context were not shared self.assertNotEqual(contexts[0], contexts[2]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/test_utils.py0000664000175000017500000007277200000000000021142 0ustar00zuulzuul00000000000000# (c) Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import builtins import functools import io import time from unittest import mock from castellan.common import objects as castellan_objects import ddt from os_brick import exception from os_brick.tests import base from os_brick import utils class WrongException(exception.BrickException): pass @ddt.ddt class TestUtils(base.TestCase): @ddt.data(('1024', 1024), ('junk', None), ('2048\n', 2048)) @ddt.unpack def test_get_device_size(self, cmd_out, expected): mock_execute = mock.Mock() mock_execute._execute.return_value = (cmd_out, None) device = '/dev/fake' ret_size = utils.get_device_size(mock_execute, device) self.assertEqual(expected, ret_size) mock_execute._execute.assert_called_once_with( 'blockdev', '--getsize64', device, run_as_root=True, root_helper=mock_execute._root_helper) @mock.patch.object(builtins, 'open') def test_get_nvme_host_id_file_available(self, mock_open): mock_open.return_value.__enter__.return_value.read.return_value = ( 'uuid\n') result = utils.get_nvme_host_id(mock.sentinel.uuid) mock_open.assert_called_once_with('/etc/nvme/hostid', 'r') self.assertEqual('uuid', result) @mock.patch.object(utils.priv_nvme, 'create_hostid') @mock.patch.object(builtins, 'open') def test_get_nvme_host_id_io_err(self, mock_open, mock_create): mock_create.return_value = mock.sentinel.uuid_return mock_open.side_effect = IOError() result = utils.get_nvme_host_id(mock.sentinel.uuid) mock_open.assert_called_once_with('/etc/nvme/hostid', 'r') mock_create.assert_called_once_with(mock.sentinel.uuid) self.assertEqual(mock.sentinel.uuid_return, result) @mock.patch('uuid.uuid4') @mock.patch.object(utils.priv_nvme, 'create_hostid') @mock.patch.object(builtins, 'open') def test_get_nvme_host_id_io_err_no_uuid(self, mock_open, mock_create, mock_uuid): mock_create.return_value = mock.sentinel.uuid_return mock_open.side_effect = IOError() result = utils.get_nvme_host_id(None) mock_open.assert_called_once_with('/etc/nvme/hostid', 'r') mock_create.assert_called_once_with(str(mock_uuid.return_value)) self.assertEqual(mock.sentinel.uuid_return, result) @mock.patch.object(utils.priv_nvme, 'create_hostid') @mock.patch.object(builtins, 'open') def test_get_nvme_host_id_err(self, mock_open, mock_create): mock_open.side_effect = Exception() result = utils.get_nvme_host_id(None) mock_open.assert_called_once_with('/etc/nvme/hostid', 'r') mock_create.assert_not_called() self.assertIsNone(result) @ddt.data(('fake_info', True), (None, False)) @ddt.unpack def test_check_valid_device(self, fake_info, expected): mock_execute = mock.Mock() mock_execute._execute.return_value = ('fake_out', fake_info) fake_path = '/dev/fake' is_valid = utils.check_valid_device(mock_execute, fake_path) self.assertEqual(expected, is_valid) mock_execute._execute.assert_called_once_with( 'dd', 'if=/dev/fake', 'of=/dev/null', 'count=1', run_as_root=True, root_helper=mock_execute._root_helper) def test_check_valid_device_error(self): mock_execute = mock.Mock() p_exception = utils.processutils.ProcessExecutionError mock_execute._execute.side_effect = p_exception fake_path = '/dev/fake' is_valid = utils.check_valid_device(mock_execute, fake_path) self.assertEqual(False, is_valid) mock_execute._execute.assert_called_once_with( 'dd', 'if=/dev/fake', 'of=/dev/null', 'count=1', run_as_root=True, root_helper=mock_execute._root_helper) @mock.patch('binascii.hexlify') @ddt.data( castellan_objects.passphrase.Passphrase(b'test-passphrase'), castellan_objects.symmetric_key.SymmetricKey('AES', mock.sentinel.bitlength, mock.sentinel.key), castellan_objects.opaque_data.OpaqueData(mock.sentinel.key), castellan_objects.private_key.PrivateKey('RSA', mock.sentinel.bitlength, mock.sentinel.key), castellan_objects.public_key.PublicKey('RSA', mock.sentinel.bitlength, mock.sentinel.key), castellan_objects.x_509.X509(mock.sentinel.key) ) def test_get_passphrase_from_secret(self, secret, mock_hexlify): """Test proper passphrase processing of different secret types.""" if secret.managed_type() == 'passphrase': passphrase = utils.get_passphrase_from_secret(secret) mock_hexlify.assert_not_called() self.assertEqual('test-passphrase', passphrase) else: hexlified_bytes = mock.MagicMock() hexlified_bytes.decode.return_value = mock.sentinel.passphrase mock_hexlify.return_value = hexlified_bytes passphrase = utils.get_passphrase_from_secret(secret) mock_hexlify.assert_called_once_with(mock.sentinel.key) self.assertEqual(mock.sentinel.passphrase, passphrase) class TestRetryDecorator(base.TestCase): def test_no_retry_required(self): self.counter = 0 with mock.patch.object(utils, '_time_sleep') as mock_sleep: @utils.retry(exception.VolumeDeviceNotFound, interval=2, retries=3, backoff_rate=2) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_retries_once(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch.object(utils, '_time_sleep') as mock_sleep: @utils.retry(exception.VolumeDeviceNotFound, interval, retries, backoff_rate) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.VolumeDeviceNotFound(device='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) mock_sleep.assert_called_with(interval) def test_limit_is_reached(self): self.counter = 0 retries = 3 interval = 2 backoff_rate = 4 with mock.patch.object(utils, '_time_sleep') as mock_sleep: @utils.retry(exception.VolumeDeviceNotFound, interval, retries, backoff_rate) def always_fails(): self.counter += 1 raise exception.VolumeDeviceNotFound(device='fake') self.assertRaises(exception.VolumeDeviceNotFound, always_fails) self.assertEqual(retries, self.counter) expected_sleep_arg = [] for i in range(retries): if i > 0: interval *= (backoff_rate ** (i - 1)) expected_sleep_arg.append(float(interval)) mock_sleep.assert_has_calls( list(map(mock.call, expected_sleep_arg))) def test_wrong_exception_no_retry(self): with mock.patch.object(utils, '_time_sleep') as mock_sleep: @utils.retry(exception.VolumeDeviceNotFound) def raise_unexpected_error(): raise WrongException("wrong exception") self.assertRaises(WrongException, raise_unexpected_error) self.assertFalse(mock_sleep.called) @mock.patch('tenacity.nap.sleep') def test_retry_exit_code(self, sleep_mock): exit_code = 5 exception = utils.processutils.ProcessExecutionError @utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code) def raise_retriable_exit_code(): raise exception(exit_code=exit_code) self.assertRaises(exception, raise_retriable_exit_code) self.assertEqual(0, sleep_mock.call_count) @mock.patch('tenacity.nap.sleep') def test_retry_exit_code_non_retriable(self, sleep_mock): exit_code = 5 exception = utils.processutils.ProcessExecutionError @utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code) def raise_non_retriable_exit_code(): raise exception(exit_code=exit_code + 1) self.assertRaises(exception, raise_non_retriable_exit_code) sleep_mock.assert_not_called() class LogTracingTestCase(base.TestCase): """Test out the log tracing.""" def test_utils_trace_method_default_logger(self): mock_log = self.mock_object(utils, 'LOG') @utils.trace def _trace_test_method_custom_logger(*args, **kwargs): return 'OK' result = _trace_test_method_custom_logger() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) def test_utils_trace_method_inner_decorator(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) def _test_decorator(f): def blah(*args, **kwargs): return f(*args, **kwargs) return blah @_test_decorator @utils.trace def _trace_test_method(*args, **kwargs): return 'OK' result = _trace_test_method(self) self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the correct function name was logged for call in mock_log.debug.call_args_list: self.assertIn('_trace_test_method', str(call)) self.assertNotIn('blah', str(call)) def test_utils_trace_method_outer_decorator(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) def _test_decorator(f): def blah(*args, **kwargs): return f(*args, **kwargs) return blah @utils.trace @_test_decorator def _trace_test_method(*args, **kwargs): return 'OK' result = _trace_test_method(self) self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the incorrect function name was logged for call in mock_log.debug.call_args_list: self.assertNotIn('_trace_test_method', str(call)) self.assertIn('blah', str(call)) def test_utils_trace_method_outer_decorator_with_functools(self): mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True self.mock_object(utils.logging, 'getLogger', mock_log) mock_log = self.mock_object(utils, 'LOG') def _test_decorator(f): @functools.wraps(f) def wraps(*args, **kwargs): return f(*args, **kwargs) return wraps @utils.trace @_test_decorator def _trace_test_method(*args, **kwargs): return 'OK' result = _trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the incorrect function name was logged for call in mock_log.debug.call_args_list: self.assertIn('_trace_test_method', str(call)) self.assertNotIn('wraps', str(call)) def test_utils_trace_method_with_exception(self): self.LOG = self.mock_object(utils, 'LOG') @utils.trace def _trace_test_method(*args, **kwargs): raise exception.VolumeDeviceNotFound('test message') self.assertRaises(exception.VolumeDeviceNotFound, _trace_test_method) exception_log = self.LOG.debug.call_args_list[1] self.assertIn('exception', str(exception_log)) self.assertIn('test message', str(exception_log)) def test_utils_trace_method_with_time(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) mock_time = mock.Mock(side_effect=[3.1, 6]) self.mock_object(time, 'time', mock_time) @utils.trace def _trace_test_method(*args, **kwargs): return 'OK' result = _trace_test_method(self) self.assertEqual('OK', result) return_log = mock_log.debug.call_args_list[1] self.assertIn('2900', str(return_log)) def test_utils_trace_method_with_password_dict(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) @utils.trace def _trace_test_method(*args, **kwargs): return {'something': 'test', 'password': 'Now you see me'} result = _trace_test_method(self) expected_unmasked_dict = {'something': 'test', 'password': 'Now you see me'} self.assertEqual(expected_unmasked_dict, result) self.assertEqual(2, mock_log.debug.call_count) self.assertIn("'password': '***'", str(mock_log.debug.call_args_list[1])) def test_utils_trace_method_with_password_str(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) @utils.trace def _trace_test_method(*args, **kwargs): return "'adminPass': 'Now you see me'" result = _trace_test_method(self) expected_unmasked_str = "'adminPass': 'Now you see me'" self.assertEqual(expected_unmasked_str, result) self.assertEqual(2, mock_log.debug.call_count) self.assertIn("'adminPass': '***'", str(mock_log.debug.call_args_list[1])) def test_utils_trace_method_with_password_in_formal_params(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) @utils.trace def _trace_test_method(*args, **kwargs): self.assertEqual('verybadpass', kwargs['connection']['data']['auth_password']) pass connector_properties = { 'data': { 'auth_password': 'verybadpass' } } _trace_test_method(self, connection=connector_properties) self.assertEqual(2, mock_log.debug.call_count) self.assertIn("'auth_password': '***'", str(mock_log.debug.call_args_list[0])) @ddt.ddt class GetDevPathTestCase(base.TestCase): """Test the get_dev_path method.""" @ddt.data({'con_props': {}, 'dev_info': {'path': '/dev/sda'}}, {'con_props': {}, 'dev_info': {'path': b'/dev/sda'}}, {'con_props': None, 'dev_info': {'path': '/dev/sda'}}, {'con_props': None, 'dev_info': {'path': b'/dev/sda'}}, {'con_props': {'device_path': b'/dev/sdb'}, 'dev_info': {'path': '/dev/sda'}}, {'con_props': {'device_path': '/dev/sdb'}, 'dev_info': {'path': b'/dev/sda'}}) @ddt.unpack def test_get_dev_path_device_info(self, con_props, dev_info): self.assertEqual('/dev/sda', utils.get_dev_path(con_props, dev_info)) @ddt.data({'con_props': {'device_path': '/dev/sda'}, 'dev_info': {'path': None}}, {'con_props': {'device_path': b'/dev/sda'}, 'dev_info': {'path': None}}, {'con_props': {'device_path': '/dev/sda'}, 'dev_info': {'path': ''}}, {'con_props': {'device_path': b'/dev/sda'}, 'dev_info': {'path': ''}}, {'con_props': {'device_path': '/dev/sda'}, 'dev_info': {}}, {'con_props': {'device_path': b'/dev/sda'}, 'dev_info': {}}, {'con_props': {'device_path': '/dev/sda'}, 'dev_info': None}, {'con_props': {'device_path': b'/dev/sda'}, 'dev_info': None}) @ddt.unpack def test_get_dev_path_conn_props(self, con_props, dev_info): self.assertEqual('/dev/sda', utils.get_dev_path(con_props, dev_info)) @ddt.data({'con_props': {'device_path': ''}, 'dev_info': {'path': None}}, {'con_props': {'device_path': None}, 'dev_info': {'path': ''}}, {'con_props': {}, 'dev_info': {}}, {'con_props': {}, 'dev_info': None}) @ddt.unpack def test_get_dev_path_no_path(self, con_props, dev_info): self.assertEqual('', utils.get_dev_path(con_props, dev_info)) @ddt.ddt class ConnectionPropertiesDecoratorsTestCase(base.TestCase): def test__symlink_name_from_device_path(self): """Get symlink for non replicated device.""" dev_name = '/dev/nvme0n1' res = utils._symlink_name_from_device_path(dev_name) self.assertEqual('/dev/disk/by-id/os-brick+dev+nvme0n1', res) def test__symlink_name_from_device_path_raid(self): """Get symlink for replicated device.""" dev_name = '/dev/md/alias' res = utils._symlink_name_from_device_path(dev_name) self.assertEqual('/dev/disk/by-id/os-brick+dev+md+alias', res) def test__device_path_from_symlink(self): """Get device name for non replicated symlink.""" symlink = '/dev/disk/by-id/os-brick+dev+nvme0n1' res = utils._device_path_from_symlink(symlink) self.assertEqual('/dev/nvme0n1', res) def test__device_path_from_symlink_raid(self): """Get device name for replicated symlink.""" symlink = '/dev/disk/by-id/os-brick+dev+md+alias' res = utils._device_path_from_symlink(symlink) self.assertEqual('/dev/md/alias', res) def test__device_path_from_symlink_file_handle(self): """Get device name for a file handle (eg: RBD).""" handle = io.StringIO() res = utils._device_path_from_symlink(handle) self.assertEqual(handle, res) @ddt.data(({}, {'type': 'block', 'path': '/dev/sda'}), ({'encrypted': False}, {'type': 'block', 'path': '/dev/sda'}), ({'encrypted': False}, {'type': 'block', 'path': b'/dev/sda'}), ({'encrypted': True}, {'type': 'block', 'path': io.StringIO()})) @ddt.unpack @mock.patch('os_brick.utils._symlink_name_from_device_path') @mock.patch('os.path.realpath') @mock.patch('os_brick.privileged.rootwrap.link_root') def test_connect_volume_prepare_result_non_encrypted( self, conn_props, result, mock_link, mock_path, mock_get_symlink): """Test decorator for non encrypted devices or non host devices.""" testing_self = mock.Mock() testing_self.connect_volume.return_value = result func = utils.connect_volume_prepare_result(testing_self.connect_volume) res = func(testing_self, conn_props) self.assertEqual(testing_self.connect_volume.return_value, res) testing_self.connect_volume.assert_called_once_with(testing_self, conn_props) mock_path.assert_not_called() mock_get_symlink.assert_not_called() mock_link.assert_not_called() @ddt.data('/dev/md/alias', b'/dev/md/alias') @mock.patch('os_brick.utils._symlink_name_from_device_path') @mock.patch('os.path.realpath') @mock.patch('os_brick.privileged.rootwrap.link_root') def test_connect_volume_prepare_result_encrypted( self, connector_path, mock_link, mock_path, mock_get_symlink): """Test decorator for encrypted device.""" real_device = '/dev/md-6' expected_symlink = '/dev/disk/by-id/os-brick_dev_md_alias' mock_path.return_value = real_device mock_get_symlink.return_value = expected_symlink testing_self = mock.Mock() testing_self.connect_volume.return_value = {'type': 'block', 'path': connector_path} conn_props = {'encrypted': True} func = utils.connect_volume_prepare_result(testing_self.connect_volume) res = func(testing_self, conn_props) self.assertEqual({'type': 'block', 'path': expected_symlink}, res) testing_self.connect_volume.assert_called_once_with(testing_self, conn_props) expected_connector_path = utils.convert_str(connector_path) mock_get_symlink.assert_called_once_with(expected_connector_path) mock_link.assert_called_once_with(real_device, expected_symlink, force=True) @ddt.data({}, {'encrypted': False}, {'encrypted': True}) @mock.patch('os_brick.utils._symlink_name_from_device_path') @mock.patch('os.path.realpath') @mock.patch('os_brick.privileged.rootwrap.link_root') def test_connect_volume_prepare_result_connect_fail( self, conn_props, mock_link, mock_path, mock_get_symlink): """Test decorator when decorated function fails.""" testing_self = mock.Mock() testing_self.connect_volume.side_effect = ValueError func = utils.connect_volume_prepare_result(testing_self.connect_volume) self.assertRaises(ValueError, func, testing_self, conn_props) mock_link.assert_not_called() mock_path.assert_not_called() mock_get_symlink.assert_not_called() @mock.patch('os_brick.utils._symlink_name_from_device_path') @mock.patch('os.path.realpath') @mock.patch('os_brick.privileged.rootwrap.link_root') def test_connect_volume_prepare_result_symlink_fail( self, mock_link, mock_path, mock_get_symlink): """Test decorator for encrypted device failing on the symlink.""" real_device = '/dev/md-6' connector_path = '/dev/md/alias' expected_symlink = '/dev/disk/by-id/os-brick_dev_md_alias' mock_path.return_value = real_device mock_get_symlink.return_value = expected_symlink testing_self = mock.Mock() connect_result = {'type': 'block', 'path': connector_path} mock_link.side_effect = ValueError testing_self.connect_volume.return_value = connect_result conn_props = {'encrypted': True} func = utils.connect_volume_prepare_result(testing_self.connect_volume) self.assertRaises(ValueError, func, testing_self, conn_props) testing_self.connect_volume.assert_called_once_with(testing_self, conn_props) mock_get_symlink.assert_called_once_with(connector_path) mock_link.assert_called_once_with(real_device, expected_symlink, force=True) testing_self.disconnect_volume.assert_called_once_with( connect_result, force=True, ignore_errors=True) @ddt.data(({'device_path': '/dev/md/alias'}, {}), ({'device_path': '/dev/md/alias', 'encrypted': False}, None), ({'device_path': '/dev/md/alias'}, {'path': '/dev/md/alias'}), ({'device_path': '/dev/md/alias', 'encrypted': False}, {'path': '/dev/md/alias'}), ({'device_path': io.StringIO(), 'encrypted': True}, None), ({'device_path': '/dev/disk/by-id/wwn-...', 'encrypted': True}, None)) @ddt.unpack @mock.patch('os_brick.utils._device_path_from_symlink') @mock.patch('os_brick.privileged.rootwrap.unlink_root') def test_connect_volume_undo_prepare_result_non_custom_link( outer_self, conn_props, dev_info, mock_unlink, mock_dev_path): class Test(object): @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): outer_self.assertEqual(conn_props, connection_properties) outer_self.assertEqual(dev_info, device_info) return 'disconnect_volume' @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties): outer_self.assertEqual(conn_props, connection_properties) return 'extend_volume' path = conn_props['device_path'] mock_dev_path.return_value = path t = Test() res = t.disconnect_volume(conn_props, dev_info) outer_self.assertEqual('disconnect_volume', res) res = t.extend_volume(conn_props) outer_self.assertEqual('extend_volume', res) if conn_props.get('encrypted'): outer_self.assertEqual(2, mock_dev_path.call_count) mock_dev_path.assert_has_calls((mock.call(path), mock.call(path))) else: mock_dev_path.assert_not_called() mock_unlink.assert_not_called() @mock.patch('os_brick.utils._device_path_from_symlink') @mock.patch('os_brick.privileged.rootwrap.unlink_root') def test_connect_volume_undo_prepare_result_encrypted_disconnect( outer_self, mock_unlink, mock_dev_path): connector_path = '/dev/md/alias' mock_dev_path.return_value = connector_path symlink_path = '/dev/disk/by-id/os-brick_dev_md_alias' mock_unlink.side_effect = ValueError class Test(object): @utils.connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): outer_self.assertEqual(connector_path, connection_properties['device_path']) outer_self.assertEqual(connector_path, device_info['path']) return 'disconnect_volume' conn_props = {'target_portal': '198.72.124.185:3260', 'target_iqn': 'iqn.2010-10.org.openstack:volume-uuid', 'target_lun': 0, 'encrypted': True, 'device_path': symlink_path} dev_info = {'type': 'block', 'path': symlink_path} t = Test() res = t.disconnect_volume(conn_props, dev_info) outer_self.assertEqual('disconnect_volume', res) mock_dev_path.assert_called_once_with(symlink_path) mock_unlink.assert_called_once_with(symlink_path) @mock.patch('os_brick.utils._device_path_from_symlink') @mock.patch('os_brick.privileged.rootwrap.unlink_root') def test_connect_volume_undo_prepare_result_encrypted_extend( outer_self, mock_unlink, mock_dev_path): connector_path = '/dev/md/alias' mock_dev_path.return_value = connector_path symlink_path = '/dev/disk/by-id/os-brick_dev_md_alias' mock_unlink.side_effect = ValueError class Test(object): @utils.connect_volume_undo_prepare_result def extend_volume(self, connection_properties): outer_self.assertEqual(connector_path, connection_properties['device_path']) return 'extend_volume' conn_props = {'target_portal': '198.72.124.185:3260', 'target_iqn': 'iqn.2010-10.org.openstack:volume-uuid', 'target_lun': 0, 'encrypted': True, 'device_path': symlink_path} t = Test() res = t.extend_volume(conn_props) outer_self.assertEqual('extend_volume', res) mock_dev_path.assert_called_once_with(symlink_path) mock_unlink.assert_not_called() @ddt.ddt class AnyTestCase(base.TestCase): @ddt.data('hola', 1, None, {'a': 1}, {1, 2}, False) def test_equal(self, what): self.assertEqual(what, utils.ANY) self.assertEqual(utils.ANY, what) @ddt.data('hola', 1, None, {'a': 1}, {1, 2}, False) def test_different(self, what): self.assertFalse(what != utils.ANY) # noqa self.assertFalse(utils.ANY != what) # noqa self.assertFalse(utils.ANY > what) # noqa self.assertFalse(utils.ANY < what) # noqa self.assertFalse(utils.ANY <= what) # noqa self.assertFalse(utils.ANY >= what) # noqa ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.631759 os_brick-6.11.0/os_brick/tests/windows/0000775000175000017500000000000000000000000020044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/__init__.py0000664000175000017500000000000000000000000022143 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/fake_win_conn.py0000664000175000017500000000225200000000000023217 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.initiator.windows import base as win_conn_base class FakeWindowsConnector(win_conn_base.BaseWindowsConnector): def connect_volume(self, connection_properties): return {} def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): pass def get_volume_paths(self, connection_properties): return [] def get_search_path(self): return None def get_all_available_volumes(self, connection_properties=None): return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/test_base.py0000664000175000017500000000254200000000000022372 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from os_win import utilsfactory from os_brick.tests import base class WindowsConnectorTestBase(base.TestCase): @mock.patch('sys.platform', 'win32') def setUp(self): super(WindowsConnectorTestBase, self).setUp() # All the Windows connectors use os_win.utilsfactory to fetch Windows # specific utils. During init, those will run methods that will fail # on other platforms. To make testing easier and avoid checking the # platform in the code, we can simply mock this factory method. utilsfactory_patcher = mock.patch.object( utilsfactory, '_get_class') utilsfactory_patcher.start() self.addCleanup(utilsfactory_patcher.stop) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/test_base_connector.py0000664000175000017500000001324300000000000024444 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_brick import exception from os_brick.initiator.windows import base as base_win_conn from os_brick.tests.windows import fake_win_conn from os_brick.tests.windows import test_base @ddt.ddt class BaseWindowsConnectorTestCase(test_base.WindowsConnectorTestBase): def setUp(self): super(BaseWindowsConnectorTestCase, self).setUp() self._diskutils = mock.Mock() self._connector = fake_win_conn.FakeWindowsConnector() self._connector._diskutils = self._diskutils @ddt.data({}, {'feature_available': True}, {'feature_available': False, 'enforce_multipath': True}) @ddt.unpack @mock.patch.object(base_win_conn.utilsfactory, 'get_hostutils') def test_check_multipath_support(self, mock_get_hostutils, feature_available=True, enforce_multipath=False): mock_hostutils = mock_get_hostutils.return_value mock_hostutils.check_server_feature.return_value = feature_available check_mpio = base_win_conn.BaseWindowsConnector.check_multipath_support if feature_available or not enforce_multipath: multipath_support = check_mpio( enforce_multipath=enforce_multipath) self.assertEqual(feature_available, multipath_support) else: self.assertRaises(exception.BrickException, check_mpio, enforce_multipath=enforce_multipath) mock_hostutils.check_server_feature.assert_called_once_with( mock_hostutils.FEATURE_MPIO) @ddt.data({}, {'mpio_requested': False}, {'mpio_available': True}) @mock.patch.object(base_win_conn.BaseWindowsConnector, 'check_multipath_support') @ddt.unpack def test_get_connector_properties(self, mock_check_mpio, mpio_requested=True, mpio_available=True): mock_check_mpio.return_value = mpio_available enforce_multipath = False props = base_win_conn.BaseWindowsConnector.get_connector_properties( multipath=mpio_requested, enforce_multipath=enforce_multipath) self.assertEqual(mpio_requested and mpio_available, props['multipath']) if mpio_requested: mock_check_mpio.assert_called_once_with(enforce_multipath) def test_get_scsi_wwn(self): mock_get_uid_and_type = self._diskutils.get_disk_uid_and_uid_type mock_get_uid_and_type.return_value = (mock.sentinel.disk_uid, mock.sentinel.uid_type) scsi_wwn = self._connector._get_scsi_wwn(mock.sentinel.dev_num) expected_wwn = '%s%s' % (mock.sentinel.uid_type, mock.sentinel.disk_uid) self.assertEqual(expected_wwn, scsi_wwn) mock_get_uid_and_type.assert_called_once_with(mock.sentinel.dev_num) @ddt.data(None, IOError) @mock.patch('os_brick.initiator.windows.base.open', new_callable=mock.mock_open) def test_check_valid_device(self, exc, mock_open): mock_open.side_effect = exc valid_device = self._connector.check_valid_device( mock.sentinel.dev_path) self.assertEqual(not exc, valid_device) mock_open.assert_any_call(mock.sentinel.dev_path, 'r') mock_read = mock_open.return_value.__enter__.return_value.read if not exc: mock_read.assert_called_once_with(1) def test_check_device_paths(self): # We expect an exception to be raised if the same volume # can be accessed through multiple paths. device_paths = [mock.sentinel.dev_path_0, mock.sentinel.dev_path_1] self.assertRaises(exception.BrickException, self._connector._check_device_paths, device_paths) @mock.patch.object(fake_win_conn.FakeWindowsConnector, 'get_volume_paths') def test_extend_volume(self, mock_get_vol_paths): mock_vol_paths = [mock.sentinel.dev_path] mock_get_vol_paths.return_value = mock_vol_paths self._connector.extend_volume(mock.sentinel.conn_props) mock_get_vol_paths.assert_called_once_with(mock.sentinel.conn_props) mock_get_dev_num = self._diskutils.get_device_number_from_device_name mock_get_dev_num.assert_called_once_with(mock.sentinel.dev_path) self._diskutils.refresh_disk.assert_called_once_with( mock_get_dev_num.return_value) @mock.patch.object(fake_win_conn.FakeWindowsConnector, 'get_volume_paths') def test_extend_volume_missing_path(self, mock_get_vol_paths): mock_get_vol_paths.return_value = [] self.assertRaises(exception.NotFound, self._connector.extend_volume, mock.sentinel.conn_props) mock_get_vol_paths.assert_called_once_with(mock.sentinel.conn_props) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/test_factory.py0000664000175000017500000000303200000000000023122 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_brick import initiator from os_brick.initiator import connector from os_brick.initiator.windows import fibre_channel from os_brick.initiator.windows import iscsi from os_brick.initiator.windows import smbfs from os_brick.tests.windows import test_base @ddt.ddt class WindowsConnectorFactoryTestCase(test_base.WindowsConnectorTestBase): @ddt.data({'proto': initiator.ISCSI, 'expected_cls': iscsi.WindowsISCSIConnector}, {'proto': initiator.FIBRE_CHANNEL, 'expected_cls': fibre_channel.WindowsFCConnector}, {'proto': initiator.SMBFS, 'expected_cls': smbfs.WindowsSMBFSConnector}) @ddt.unpack @mock.patch('sys.platform', 'win32') def test_factory(self, proto, expected_cls): obj = connector.InitiatorConnector.factory(proto, None) self.assertIsInstance(obj, expected_cls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/test_fibre_channel.py0000664000175000017500000002657200000000000024250 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_win import exceptions as os_win_exc from os_brick import exception from os_brick.initiator.windows import fibre_channel as fc from os_brick.tests.windows import test_base @ddt.ddt class WindowsFCConnectorTestCase(test_base.WindowsConnectorTestBase): def setUp(self): super(WindowsFCConnectorTestCase, self).setUp() self._connector = fc.WindowsFCConnector( device_scan_interval=mock.sentinel.rescan_interval) self._diskutils = self._connector._diskutils self._fc_utils = self._connector._fc_utils @ddt.data(True, False) @mock.patch.object(fc.utilsfactory, 'get_fc_utils') def test_get_volume_connector_props(self, valid_fc_hba_ports, mock_get_fc_utils): fake_fc_hba_ports = [{'node_name': mock.sentinel.node_name, 'port_name': mock.sentinel.port_name}, {'node_name': mock.sentinel.second_node_name, 'port_name': mock.sentinel.second_port_name}] self._fc_utils = mock_get_fc_utils.return_value self._fc_utils.get_fc_hba_ports.return_value = ( fake_fc_hba_ports if valid_fc_hba_ports else []) props = self._connector.get_connector_properties() self._fc_utils.refresh_hba_configuration.assert_called_once_with() self._fc_utils.get_fc_hba_ports.assert_called_once_with() if valid_fc_hba_ports: expected_props = { 'wwpns': [mock.sentinel.port_name, mock.sentinel.second_port_name], 'wwnns': [mock.sentinel.node_name, mock.sentinel.second_node_name] } else: expected_props = {} self.assertCountEqual(expected_props, props) @mock.patch.object(fc.WindowsFCConnector, '_get_scsi_wwn') @mock.patch.object(fc.WindowsFCConnector, 'get_volume_paths') def test_connect_volume(self, mock_get_vol_paths, mock_get_scsi_wwn): mock_get_vol_paths.return_value = [mock.sentinel.dev_name] mock_get_dev_num = self._diskutils.get_device_number_from_device_name mock_get_dev_num.return_value = mock.sentinel.dev_num expected_device_info = dict(type='block', path=mock.sentinel.dev_name, number=mock.sentinel.dev_num, scsi_wwn=mock_get_scsi_wwn.return_value) device_info = self._connector.connect_volume(mock.sentinel.conn_props) self.assertEqual(expected_device_info, device_info) mock_get_vol_paths.assert_called_once_with(mock.sentinel.conn_props) mock_get_dev_num.assert_called_once_with(mock.sentinel.dev_name) mock_get_scsi_wwn.assert_called_once_with(mock.sentinel.dev_num) @mock.patch.object(fc.WindowsFCConnector, 'get_volume_paths') def test_connect_volume_not_found(self, mock_get_vol_paths): mock_get_vol_paths.return_value = [] self.assertRaises(exception.NoFibreChannelVolumeDeviceFound, self._connector.connect_volume, mock.sentinel.conn_props) @ddt.data({'volume_mappings': [], 'expected_paths': []}, {'volume_mappings': [dict(device_name='', fcp_lun=mock.sentinel.fcp_lun)] * 3, 'scsi_id_side_eff': os_win_exc.OSWinException, 'expected_paths': []}, {'volume_mappings': [dict(device_name='', fcp_lun=mock.sentinel.fcp_lun), dict(device_name=mock.sentinel.disk_path)], 'expected_paths': [mock.sentinel.disk_path]}, {'volume_mappings': [dict(device_name='', fcp_lun=mock.sentinel.fcp_lun)], 'scsi_id_side_eff': [[mock.sentinel.disk_path]], 'expected_paths': [mock.sentinel.disk_path]}, {'volume_mappings': [dict(device_name=mock.sentinel.disk_path)], 'use_multipath': True, 'is_mpio_disk': True, 'expected_paths': [mock.sentinel.disk_path]}, {'volume_mappings': [dict(device_name=mock.sentinel.disk_path)], 'use_multipath': True, 'is_mpio_disk': False, 'expected_paths': []}) @ddt.unpack @mock.patch('time.sleep') @mock.patch.object(fc.WindowsFCConnector, '_get_fc_volume_mappings') @mock.patch.object(fc.WindowsFCConnector, '_get_disk_paths_by_scsi_id') def test_get_volume_paths(self, mock_get_disk_paths_by_scsi_id, mock_get_fc_mappings, mock_sleep, volume_mappings, expected_paths, scsi_id_side_eff=None, use_multipath=False, is_mpio_disk=False): mock_get_dev_num = self._diskutils.get_device_number_from_device_name mock_get_fc_mappings.return_value = volume_mappings mock_get_disk_paths_by_scsi_id.side_effect = scsi_id_side_eff self._diskutils.is_mpio_disk.return_value = is_mpio_disk self._connector.use_multipath = use_multipath vol_paths = self._connector.get_volume_paths(mock.sentinel.conn_props) self.assertEqual(expected_paths, vol_paths) # In this test case, either the volume is found after the first # attempt, either it's not found at all, in which case we'd expect # the number of retries to be the requested maximum number of rescans. expected_try_count = (1 if expected_paths else self._connector.device_scan_attempts) self._diskutils.rescan_disks.assert_has_calls( [mock.call()] * expected_try_count) mock_get_fc_mappings.assert_has_calls( [mock.call(mock.sentinel.conn_props)] * expected_try_count) mock_sleep.assert_has_calls( [mock.call(mock.sentinel.rescan_interval)] * (expected_try_count - 1)) dev_names = [mapping['device_name'] for mapping in volume_mappings if mapping['device_name']] if volume_mappings and not dev_names: mock_get_disk_paths_by_scsi_id.assert_any_call( mock.sentinel.conn_props, volume_mappings[0]['fcp_lun']) if expected_paths and use_multipath: mock_get_dev_num.assert_called_once_with(expected_paths[0]) self._diskutils.is_mpio_disk.assert_any_call( mock_get_dev_num.return_value) @mock.patch.object(fc.WindowsFCConnector, '_get_fc_hba_mappings') def test_get_fc_volume_mappings(self, mock_get_fc_hba_mappings): fake_target_wwpn = 'FAKE_TARGET_WWPN' fake_conn_props = dict(target_lun=mock.sentinel.target_lun, target_wwn=[fake_target_wwpn]) mock_hba_mappings = {mock.sentinel.node_name: mock.sentinel.hba_ports} mock_get_fc_hba_mappings.return_value = mock_hba_mappings all_target_mappings = [{'device_name': mock.sentinel.dev_name, 'port_name': fake_target_wwpn, 'lun': mock.sentinel.target_lun}, {'device_name': mock.sentinel.dev_name_1, 'port_name': mock.sentinel.target_port_name_1, 'lun': mock.sentinel.target_lun}, {'device_name': mock.sentinel.dev_name, 'port_name': mock.sentinel.target_port_name, 'lun': mock.sentinel.target_lun_1}] expected_mappings = [all_target_mappings[0]] self._fc_utils.get_fc_target_mappings.return_value = ( all_target_mappings) volume_mappings = self._connector._get_fc_volume_mappings( fake_conn_props) self.assertEqual(expected_mappings, volume_mappings) def test_get_fc_hba_mappings(self): fake_fc_hba_ports = [{'node_name': mock.sentinel.node_name, 'port_name': mock.sentinel.port_name}] self._fc_utils.get_fc_hba_ports.return_value = fake_fc_hba_ports resulted_mappings = self._connector._get_fc_hba_mappings() expected_mappings = { mock.sentinel.node_name: [mock.sentinel.port_name]} self.assertEqual(expected_mappings, resulted_mappings) @mock.patch.object(fc.WindowsFCConnector, '_get_dev_nums_by_scsi_id') def test_get_disk_paths_by_scsi_id(self, mock_get_dev_nums): remote_wwpns = [mock.sentinel.remote_wwpn_0, mock.sentinel.remote_wwpn_1] fake_init_target_map = {mock.sentinel.local_wwpn: remote_wwpns} conn_props = dict(initiator_target_map=fake_init_target_map) mock_get_dev_nums.side_effect = [os_win_exc.FCException, [mock.sentinel.dev_num]] mock_get_dev_name = self._diskutils.get_device_name_by_device_number mock_get_dev_name.return_value = mock.sentinel.dev_name disk_paths = self._connector._get_disk_paths_by_scsi_id( conn_props, mock.sentinel.fcp_lun) self.assertEqual([mock.sentinel.dev_name], disk_paths) mock_get_dev_nums.assert_has_calls([ mock.call(mock.sentinel.local_wwpn, remote_wwpn, mock.sentinel.fcp_lun) for remote_wwpn in remote_wwpns]) mock_get_dev_name.assert_called_once_with(mock.sentinel.dev_num) @mock.patch.object(fc.WindowsFCConnector, '_get_fc_hba_wwn_for_port') def test_get_dev_nums_by_scsi_id(self, mock_get_fc_hba_wwn): fake_identifier = dict(id=mock.sentinel.id, type=mock.sentinel.type) mock_get_fc_hba_wwn.return_value = mock.sentinel.local_wwnn self._fc_utils.get_scsi_device_identifiers.return_value = [ fake_identifier] self._diskutils.get_disk_numbers_by_unique_id.return_value = ( mock.sentinel.dev_nums) dev_nums = self._connector._get_dev_nums_by_scsi_id( mock.sentinel.local_wwpn, mock.sentinel.remote_wwpn, mock.sentinel.fcp_lun) self.assertEqual(mock.sentinel.dev_nums, dev_nums) mock_get_fc_hba_wwn.assert_called_once_with(mock.sentinel.local_wwpn) self._fc_utils.get_scsi_device_identifiers.assert_called_once_with( mock.sentinel.local_wwnn, mock.sentinel.local_wwpn, mock.sentinel.remote_wwpn, mock.sentinel.fcp_lun) self._diskutils.get_disk_numbers_by_unique_id.assert_called_once_with( unique_id=mock.sentinel.id, unique_id_format=mock.sentinel.type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/test_iscsi.py0000664000175000017500000002164500000000000022577 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_win import exceptions as os_win_exc from os_brick import exception from os_brick.initiator.windows import iscsi from os_brick.tests.windows import test_base @ddt.ddt class WindowsISCSIConnectorTestCase(test_base.WindowsConnectorTestBase): @mock.patch.object(iscsi.WindowsISCSIConnector, 'validate_initiators') def setUp(self, mock_validate_connectors): super(WindowsISCSIConnectorTestCase, self).setUp() self._diskutils = mock.Mock() self._iscsi_utils = mock.Mock() self._connector = iscsi.WindowsISCSIConnector( device_scan_interval=mock.sentinel.rescan_interval) self._connector._diskutils = self._diskutils self._connector._iscsi_utils = self._iscsi_utils @ddt.data({'requested_initiators': [mock.sentinel.initiator_0], 'available_initiators': [mock.sentinel.initiator_0, mock.sentinel.initiator_1]}, {'requested_initiators': [mock.sentinel.initiator_0], 'available_initiators': [mock.sentinel.initiator_1]}, {'requested_initiators': [], 'available_initiators': [mock.sentinel.software_initiator]}) @ddt.unpack def test_validate_initiators(self, requested_initiators, available_initiators): self._iscsi_utils.get_iscsi_initiators.return_value = ( available_initiators) self._connector.initiator_list = requested_initiators expected_valid_initiator = not ( set(requested_initiators).difference(set(available_initiators))) valid_initiator = self._connector.validate_initiators() self.assertEqual(expected_valid_initiator, valid_initiator) def test_get_initiator(self): initiator = self._connector.get_initiator() self.assertEqual(self._iscsi_utils.get_iscsi_initiator.return_value, initiator) @mock.patch.object(iscsi, 'utilsfactory') def test_get_connector_properties(self, mock_utilsfactory): mock_iscsi_utils = ( mock_utilsfactory.get_iscsi_initiator_utils.return_value) props = self._connector.get_connector_properties() expected_props = dict( initiator=mock_iscsi_utils.get_iscsi_initiator.return_value) self.assertEqual(expected_props, props) @mock.patch.object(iscsi.WindowsISCSIConnector, '_get_all_targets') def test_get_all_paths(self, mock_get_all_targets): initiators = [mock.sentinel.initiator_0, mock.sentinel.initiator_1] all_targets = [(mock.sentinel.portal_0, mock.sentinel.target_0, mock.sentinel.lun_0), (mock.sentinel.portal_1, mock.sentinel.target_1, mock.sentinel.lun_1)] self._connector.initiator_list = initiators mock_get_all_targets.return_value = all_targets expected_paths = [ (initiator_name, target_portal, target_iqn, target_lun) for target_portal, target_iqn, target_lun in all_targets for initiator_name in initiators] all_paths = self._connector._get_all_paths(mock.sentinel.conn_props) self.assertEqual(expected_paths, all_paths) mock_get_all_targets.assert_called_once_with(mock.sentinel.conn_props) @ddt.data(True, False) @mock.patch.object(iscsi.WindowsISCSIConnector, '_get_scsi_wwn') @mock.patch.object(iscsi.WindowsISCSIConnector, '_get_all_paths') def test_connect_volume(self, use_multipath, mock_get_all_paths, mock_get_scsi_wwn): fake_paths = [(mock.sentinel.initiator_name, mock.sentinel.target_portal, mock.sentinel.target_iqn, mock.sentinel.target_lun)] * 3 fake_conn_props = dict(auth_username=mock.sentinel.auth_username, auth_password=mock.sentinel.auth_password) mock_get_all_paths.return_value = fake_paths self._iscsi_utils.login_storage_target.side_effect = [ os_win_exc.OSWinException, None, None] self._iscsi_utils.get_device_number_and_path.return_value = ( mock.sentinel.device_number, mock.sentinel.device_path) self._connector.use_multipath = use_multipath device_info = self._connector.connect_volume(fake_conn_props) expected_device_info = dict(type='block', path=mock.sentinel.device_path, number=mock.sentinel.device_number, scsi_wwn=mock_get_scsi_wwn.return_value) self.assertEqual(expected_device_info, device_info) mock_get_all_paths.assert_called_once_with(fake_conn_props) expected_login_attempts = 3 if use_multipath else 2 self._iscsi_utils.login_storage_target.assert_has_calls( [mock.call(target_lun=mock.sentinel.target_lun, target_iqn=mock.sentinel.target_iqn, target_portal=mock.sentinel.target_portal, auth_username=mock.sentinel.auth_username, auth_password=mock.sentinel.auth_password, mpio_enabled=use_multipath, initiator_name=mock.sentinel.initiator_name, ensure_lun_available=False)] * expected_login_attempts) self._iscsi_utils.get_device_number_and_path.assert_called_once_with( mock.sentinel.target_iqn, mock.sentinel.target_lun, retry_attempts=self._connector.device_scan_attempts, retry_interval=self._connector.device_scan_interval, rescan_disks=True, ensure_mpio_claimed=use_multipath) mock_get_scsi_wwn.assert_called_once_with(mock.sentinel.device_number) @mock.patch.object(iscsi.WindowsISCSIConnector, '_get_all_paths') def test_connect_volume_exc(self, mock_get_all_paths): fake_paths = [(mock.sentinel.initiator_name, mock.sentinel.target_portal, mock.sentinel.target_iqn, mock.sentinel.target_lun)] * 3 mock_get_all_paths.return_value = fake_paths self._iscsi_utils.login_storage_target.side_effect = ( os_win_exc.OSWinException) self._connector.use_multipath = True self.assertRaises(exception.BrickException, self._connector.connect_volume, connection_properties={}) @mock.patch.object(iscsi.WindowsISCSIConnector, '_get_all_targets') def test_disconnect_volume(self, mock_get_all_targets): targets = [ (mock.sentinel.portal_0, mock.sentinel.tg_0, mock.sentinel.lun_0), (mock.sentinel.portal_1, mock.sentinel.tg_1, mock.sentinel.lun_1)] mock_get_all_targets.return_value = targets self._iscsi_utils.get_target_luns.return_value = [mock.sentinel.lun_0] self._connector.disconnect_volume(mock.sentinel.conn_props, mock.sentinel.dev_info) self._diskutils.rescan_disks.assert_called_once_with() mock_get_all_targets.assert_called_once_with(mock.sentinel.conn_props) self._iscsi_utils.logout_storage_target.assert_called_once_with( mock.sentinel.tg_0) self._iscsi_utils.get_target_luns.assert_has_calls( [mock.call(mock.sentinel.tg_0), mock.call(mock.sentinel.tg_1)]) @mock.patch.object(iscsi.WindowsISCSIConnector, '_get_all_targets') @mock.patch.object(iscsi.WindowsISCSIConnector, '_check_device_paths') def test_get_volume_paths(self, mock_check_dev_paths, mock_get_all_targets): targets = [ (mock.sentinel.portal_0, mock.sentinel.tg_0, mock.sentinel.lun_0), (mock.sentinel.portal_1, mock.sentinel.tg_1, mock.sentinel.lun_1)] mock_get_all_targets.return_value = targets self._iscsi_utils.get_device_number_and_path.return_value = [ mock.sentinel.dev_num, mock.sentinel.dev_path] volume_paths = self._connector.get_volume_paths( mock.sentinel.conn_props) expected_paths = [mock.sentinel.dev_path] self.assertEqual(expected_paths, volume_paths) mock_check_dev_paths.assert_called_once_with(set(expected_paths)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/test_rbd.py0000664000175000017500000001233400000000000022227 0ustar00zuulzuul00000000000000# Copyright 2020 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_concurrency import processutils from os_brick import exception from os_brick.initiator.windows import rbd from os_brick.tests.initiator.connectors import test_base_rbd from os_brick.tests.windows import test_base @ddt.ddt class WindowsRBDConnectorTestCase(test_base_rbd.RBDConnectorTestMixin, test_base.WindowsConnectorTestBase): def setUp(self): super(WindowsRBDConnectorTestCase, self).setUp() self._diskutils = mock.Mock() self._execute = mock.Mock(return_value=['fake_stdout', 'fake_stderr']) self._conn = rbd.WindowsRBDConnector(execute=self._execute) self._conn._diskutils = self._diskutils self.dev_name = '\\\\.\\PhysicalDrive5' @ddt.data(True, False) def test_check_rbd(self, rbd_available): self._execute.side_effect = ( None if rbd_available else processutils.ProcessExecutionError) self.assertEqual(rbd_available, self._conn._check_rbd()) if rbd_available: self._conn._ensure_rbd_available() else: self.assertRaises(exception.BrickException, self._conn._ensure_rbd_available) expected_cmd = ['where.exe', 'rbd'] self._execute.assert_any_call(*expected_cmd) @mock.patch.object(rbd.WindowsRBDConnector, 'get_device_name') def test_get_volume_paths(self, mock_get_dev_name): vol_paths = self._conn.get_volume_paths(mock.sentinel.conn_props) self.assertEqual([mock_get_dev_name.return_value], vol_paths) mock_get_dev_name.assert_called_once_with(mock.sentinel.conn_props) @ddt.data(True, False) @mock.patch.object(rbd.WindowsRBDConnector, 'get_device_name') @mock.patch('oslo_utils.eventletutils.EventletEvent.wait') def test_wait_for_volume(self, device_found, mock_wait, mock_get_dev_name): mock_open = mock.mock_open() if device_found: mock_get_dev_name.return_value = mock.sentinel.dev_name else: # First call fails to locate the device, the following ones can't # open it. mock_get_dev_name.side_effect = ( [None] + [mock.sentinel.dev_name] * self._conn.device_scan_attempts) mock_open.side_effect = FileNotFoundError with mock.patch.object(rbd, 'open', mock_open, create=True): if device_found: dev_name = self._conn._wait_for_volume( self.connection_properties) self.assertEqual(mock.sentinel.dev_name, dev_name) else: self.assertRaises(exception.VolumeDeviceNotFound, self._conn._wait_for_volume, self.connection_properties) mock_open.assert_any_call(mock.sentinel.dev_name, 'rb') mock_get_dev_name.assert_any_call(self.connection_properties, expect=False) @mock.patch.object(rbd.WindowsRBDConnector, '_wait_for_volume') @mock.patch.object(rbd.WindowsRBDConnector, 'get_device_name') def test_connect_volume(self, mock_get_dev_name, mock_wait_vol): mock_get_dev_name.return_value = None mock_wait_vol.return_value = self.dev_name ret_val = self._conn.connect_volume(self.connection_properties) exp_ret_val = { 'path': self.dev_name, 'type': 'block' } self.assertEqual(exp_ret_val, ret_val) exp_exec_args = ['rbd', 'device', 'map', self.image_name] exp_exec_args += self._conn._get_rbd_args(self.connection_properties) self._execute.assert_any_call(*exp_exec_args) mock_wait_vol.assert_called_once_with(self.connection_properties) mock_get_dev_num = self._diskutils.get_device_number_from_device_name mock_get_dev_num.assert_called_once_with(self.dev_name) self._diskutils.set_disk_offline.assert_called_once_with( mock_get_dev_num.return_value) @ddt.data(True, False) @mock.patch.object(rbd.WindowsRBDConnector, 'get_device_name') def test_disconnect_volume(self, force, mock_get_dev_name): mock_get_dev_name.return_value = self.dev_name self._conn.disconnect_volume(self.connection_properties, force=force) exp_exec_args = ['rbd', 'device', 'unmap', self.image_name] exp_exec_args += self._conn._get_rbd_args(self.connection_properties) if force: exp_exec_args += ["-o", "hard-disconnect"] self._execute.assert_any_call(*exp_exec_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/tests/windows/test_smbfs.py0000664000175000017500000001707000000000000022574 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from os_brick.initiator.windows import smbfs from os_brick.remotefs import windows_remotefs from os_brick.tests.windows import test_base @ddt.ddt class WindowsSMBFSConnectorTestCase(test_base.WindowsConnectorTestBase): def setUp(self): super(WindowsSMBFSConnectorTestCase, self).setUp() self._load_connector() @mock.patch.object(windows_remotefs, 'WindowsRemoteFsClient') def _load_connector(self, mock_remotefs_cls, *args, **kwargs): self._connector = smbfs.WindowsSMBFSConnector(*args, **kwargs) self._remotefs = mock_remotefs_cls.return_value self._vhdutils = self._connector._vhdutils self._diskutils = self._connector._diskutils @mock.patch.object(smbfs.WindowsSMBFSConnector, '_get_disk_path') @mock.patch.object(smbfs.WindowsSMBFSConnector, 'ensure_share_mounted') def test_connect_volume(self, mock_ensure_mounted, mock_get_disk_path): device_info = self._connector.connect_volume(mock.sentinel.conn_props) expected_info = dict(type='file', path=mock_get_disk_path.return_value) self.assertEqual(expected_info, device_info) mock_ensure_mounted.assert_called_once_with(mock.sentinel.conn_props) mock_get_disk_path.assert_called_once_with(mock.sentinel.conn_props) @ddt.data(True, False) @mock.patch.object(smbfs.WindowsSMBFSConnector, '_get_disk_path') @mock.patch.object(smbfs.WindowsSMBFSConnector, 'ensure_share_mounted') def test_connect_and_mount_volume(self, read_only, mock_ensure_mounted, mock_get_disk_path): self._load_connector(expect_raw_disk=True) fake_conn_props = dict(access_mode='ro' if read_only else 'rw') self._vhdutils.get_virtual_disk_physical_path.return_value = ( mock.sentinel.raw_disk_path) mock_get_disk_path.return_value = mock.sentinel.image_path device_info = self._connector.connect_volume(fake_conn_props) expected_info = dict(type='file', path=mock.sentinel.raw_disk_path) self.assertEqual(expected_info, device_info) self._vhdutils.attach_virtual_disk.assert_called_once_with( mock.sentinel.image_path, read_only=read_only) self._vhdutils.get_virtual_disk_physical_path.assert_called_once_with( mock.sentinel.image_path) get_dev_num = self._diskutils.get_device_number_from_device_name get_dev_num.assert_called_once_with(mock.sentinel.raw_disk_path) self._diskutils.set_disk_offline.assert_called_once_with( get_dev_num.return_value) @mock.patch.object(smbfs.WindowsSMBFSConnector, '_get_disk_path') @mock.patch.object(smbfs.WindowsSMBFSConnector, '_get_export_path') def test_disconnect_volume(self, mock_get_export_path, mock_get_disk_path): self._connector.disconnect_volume(mock.sentinel.conn_props, mock.sentinel.dev_info) mock_get_disk_path.assert_called_once_with( mock.sentinel.conn_props) self._vhdutils.detach_virtual_disk.assert_called_once_with( mock_get_disk_path.return_value) self._remotefs.unmount.assert_called_once_with( mock_get_export_path.return_value) mock_get_export_path.assert_called_once_with(mock.sentinel.conn_props) def test_get_export_path(self): fake_export = '//ip/share' fake_conn_props = dict(export=fake_export) expected_export = fake_export.replace('/', '\\') export_path = self._connector._get_export_path(fake_conn_props) self.assertEqual(expected_export, export_path) @ddt.data({}, {'mount_base': mock.sentinel.mount_base}, {'is_local_share': True}, {'is_local_share': True, 'local_path_for_loopbk': True}) @ddt.unpack def test_get_disk_path(self, mount_base=None, local_path_for_loopbk=False, is_local_share=False): fake_mount_point = r'C:\\fake_mount_point' fake_share_name = 'fake_share' fake_local_share_path = 'C:\\%s' % fake_share_name fake_export_path = '\\\\host\\%s' % fake_share_name fake_disk_name = 'fake_disk.vhdx' fake_conn_props = dict(name=fake_disk_name, export=fake_export_path) self._remotefs.get_mount_base.return_value = mount_base self._remotefs.get_mount_point.return_value = fake_mount_point self._remotefs.get_local_share_path.return_value = ( fake_local_share_path) self._remotefs.get_share_name.return_value = fake_share_name self._connector._local_path_for_loopback = local_path_for_loopbk self._connector._smbutils.is_local_share.return_value = is_local_share expecting_local = local_path_for_loopbk and is_local_share if mount_base: expected_export_path = fake_mount_point elif expecting_local: # In this case, we expect the local share export path to be # used directly. expected_export_path = fake_local_share_path else: expected_export_path = fake_export_path expected_disk_path = os.path.join(expected_export_path, fake_disk_name) disk_path = self._connector._get_disk_path(fake_conn_props) self.assertEqual(expected_disk_path, disk_path) if mount_base: self._remotefs.get_mount_point.assert_called_once_with( fake_export_path) elif expecting_local: self._connector._smbutils.is_local_share.assert_called_once_with( fake_export_path) self._remotefs.get_local_share_path.assert_called_once_with( fake_export_path) def test_get_search_path(self): search_path = self._connector.get_search_path() self.assertEqual(search_path, self._remotefs.get_mount_base.return_value) @mock.patch.object(smbfs.WindowsSMBFSConnector, '_get_disk_path') def test_volume_paths(self, mock_get_disk_path): expected_paths = [mock_get_disk_path.return_value] volume_paths = self._connector.get_volume_paths( mock.sentinel.conn_props) self.assertEqual(expected_paths, volume_paths) mock_get_disk_path.assert_called_once_with( mock.sentinel.conn_props) @mock.patch.object(smbfs.WindowsSMBFSConnector, '_get_export_path') def test_ensure_share_mounted(self, mock_get_export_path): fake_conn_props = dict(options=mock.sentinel.mount_opts) self._connector.ensure_share_mounted(fake_conn_props) self._remotefs.mount.assert_called_once_with( mock_get_export_path.return_value, mock.sentinel.mount_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/utils.py0000664000175000017500000004275200000000000016734 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Utilities and helper functions.""" from __future__ import annotations import binascii import functools import inspect import logging as py_logging import os import time from typing import Any, Callable, Optional, Type, Union import uuid as uuid_lib from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import strutils from os_brick import executor from os_brick.i18n import _ from os_brick.privileged import nvmeof as priv_nvme from os_brick.privileged import rootwrap as priv_rootwrap CUSTOM_LINK_PREFIX = '/dev/disk/by-id/os-brick' _time_sleep = time.sleep def _sleep(secs: float) -> None: """Helper class to make it easier to work around tenacity's sleep calls. Apparently we are all idiots for wanting to test our code here [0], so this is a hack to be able to get retries to not actually sleep. [0] https://github.com/jd/tenacity/issues/25 """ _time_sleep(secs) time.sleep = _sleep import tenacity # noqa LOG = logging.getLogger(__name__) class retry_if_exit_code(tenacity.retry_if_exception): """Retry on ProcessExecutionError specific exit codes.""" def __init__(self, codes: Union[int, tuple[int, ...]]): self.codes = (codes,) if isinstance(codes, int) else codes super(retry_if_exit_code, self).__init__(self._check_exit_code) def _check_exit_code(self, exc: Type[Exception]) -> bool: return (bool(exc) and isinstance(exc, processutils.ProcessExecutionError) and exc.exit_code in self.codes) def retry(retry_param: Union[None, Type[Exception], tuple[Type[Exception], ...], int, tuple[int, ...]], interval: float = 1, retries: int = 3, backoff_rate: float = 2, retry: Callable = tenacity.retry_if_exception_type) -> Callable: if retries < 1: raise ValueError(_('Retries must be greater than or ' 'equal to 1 (received: %s). ') % retries) def _decorator(f): @functools.wraps(f) def _wrapper(*args, **kwargs): r = tenacity.Retrying( before_sleep=tenacity.before_sleep_log(LOG, logging.DEBUG), after=tenacity.after_log(LOG, logging.DEBUG), stop=tenacity.stop_after_attempt(retries), reraise=True, retry=retry(retry_param), wait=tenacity.wait_exponential( multiplier=interval, min=0, exp_base=backoff_rate)) return r(f, *args, **kwargs) return _wrapper return _decorator def platform_matches(current_platform: str, connector_platform: str) -> bool: curr_p = current_platform.upper() conn_p = connector_platform.upper() if conn_p == 'ALL': return True # Add tests against families of platforms if curr_p == conn_p: return True return False def os_matches(current_os: str, connector_os: str) -> bool: curr_os = current_os.upper() conn_os = connector_os.upper() if conn_os == 'ALL': return True # add tests against OSs if (conn_os == curr_os or conn_os in curr_os): return True return False def merge_dict(dict1: dict, dict2: dict) -> dict: """Try to safely merge 2 dictionaries.""" if type(dict1) is not dict: raise Exception("dict1 is not a dictionary") if type(dict2) is not dict: raise Exception("dict2 is not a dictionary") dict3 = dict1.copy() dict3.update(dict2) return dict3 def trace(f: Callable) -> Callable: """Trace calls to the decorated function. This decorator should always be defined as the outermost decorator so it is defined last. This is important so it does not interfere with other decorators. Using this decorator on a function will cause its execution to be logged at `DEBUG` level with arguments, return values, and exceptions. :returns: a function decorator """ func_name = f.__name__ @functools.wraps(f) def trace_logging_wrapper(*args, **kwargs): if len(args) > 0: maybe_self = args[0] else: maybe_self = kwargs.get('self', None) if maybe_self and hasattr(maybe_self, '__module__'): logger = logging.getLogger(maybe_self.__module__) else: logger = LOG # NOTE(ameade): Don't bother going any further if DEBUG log level # is not enabled for the logger. if not logger.isEnabledFor(py_logging.DEBUG): return f(*args, **kwargs) all_args = inspect.getcallargs(f, *args, **kwargs) logger.debug('==> %(func)s: call %(all_args)r', {'func': func_name, # NOTE(mriedem): We have to stringify the dict first # and don't use mask_dict_password because it results in # an infinite recursion failure. 'all_args': strutils.mask_password( str(all_args))}) start_time = time.time() * 1000 try: result = f(*args, **kwargs) except Exception as exc: total_time = int(round(time.time() * 1000)) - start_time logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r', {'func': func_name, 'time': total_time, 'exc': exc}) raise total_time = int(round(time.time() * 1000)) - start_time if isinstance(result, dict): mask_result = strutils.mask_dict_password(result) elif isinstance(result, str): mask_result = strutils.mask_password(result) else: mask_result = result logger.debug('<== %(func)s: return (%(time)dms) %(result)r', {'func': func_name, 'time': total_time, 'result': mask_result}) return result return trace_logging_wrapper def convert_str(text: Union[bytes, str]) -> str: """Convert to native string. Convert bytes and Unicode strings to native strings: * convert to Unicode on Python 3: decode bytes from UTF-8 """ if isinstance(text, bytes): return text.decode('utf-8') else: return text def get_host_nqn(system_uuid: Optional[str] = None) -> Optional[str]: """Ensure that hostnqn exists, creating if necessary. This method tries to return contents from /etc/nvme/hostnqn and if not possible then creates the file calling create_hostnqn and passing provided system_uuid and returns the contents of the newly created file. Method create_hostnqn gives priority to the provided system_uuid parameter for the contents of the file over other alternatives it has. """ try: with open('/etc/nvme/hostnqn', 'r') as f: host_nqn = f.read().strip() except IOError: host_nqn = priv_nvme.create_hostnqn(system_uuid) except Exception: host_nqn = None return host_nqn def get_nvme_host_id(uuid: Optional[str]) -> Optional[str]: """Get the nvme host id If the hostid file doesn't exist create it either with the passed uuid or a random one. """ try: with open('/etc/nvme/hostid', 'r') as f: host_id = f.read().strip() except IOError: uuid = uuid or str(uuid_lib.uuid4()) host_id = priv_nvme.create_hostid(uuid) except Exception: host_id = None return host_id def _symlink_name_from_device_path(device_path): """Generate symlink absolute path for encrypted devices. The symlink's basename will contain the original device name so we can reconstruct it afterwards on disconnect. Being able to restore the original device name may be important for some connectors, because the system may have multiple devices for the same connection information (for example if a controller came back to life after having network issues and an auto scan presented the device) and if we reuse an existing symlink created by udev we wouldn't know which one was actually used. The symlink will be created under the /dev/disk/by-id directory and will prefix the name with os-brick- and then continue with the full device path that was passed (replacing '/' with '+') """ # Convert / into + that is unlikely used by devices or symlinks (cryptsetup # is not happy if we use · in the symlink) encoded_device = device_path.replace('/', '+') return CUSTOM_LINK_PREFIX + encoded_device def _device_path_from_symlink(symlink): """Get the original encrypted device path from the device symlink. This is the reverse operation of the one performed by the _symlink_name_from_device_path method. """ if (symlink and isinstance(symlink, str) and symlink.startswith(CUSTOM_LINK_PREFIX)): ending = symlink[len(CUSTOM_LINK_PREFIX):] return ending.replace('+', '/') return symlink def connect_volume_prepare_result( func: Callable[[Any, dict], dict]) -> Callable[[Any, dict], dict]: """Decorator to prepare the result of connect_volume for encrypted volumes. WARNING: This decorator must be **before** any connect_volume locking because it may call disconnect_volume. Encryptor drivers expect a symlink that they "own", so that they can modify it as they want. The current flow is like this: - connect_volume connector call - libvirt config is generated by Nova using returned path - connect_volume encryptor call => Replaces the original path For encrypted volumes the decorator modifies the "path" value for the returned dictionary. Unencrypted volumes will be left unchanged. There are special connectors that return a file descriptor instead of a path depending on the parameters. In those cases the result will also be left untouched. If a connector relies on the path that has been used they can use the connect_volume_undo_prepare_result decorator to get the value changed back the original path. """ @functools.wraps(func) def change_encrypted(self, connection_properties): res = func(self, connection_properties) # Decode if path is bytes, otherwise leave it as it is device_path = convert_str(res['path']) # There are connectors that sometimes return file descriptors (rbd) if (connection_properties.get('encrypted') and isinstance(device_path, str)): symlink = _symlink_name_from_device_path(device_path) try: priv_rootwrap.link_root(os.path.realpath(device_path), symlink, force=True) res['path'] = symlink except Exception as exc: LOG.debug('Failed to create symlink, cleaning connection: %s', exc) self.disconnect_volume(res, force=True, ignore_errors=True) raise return res return change_encrypted def get_dev_path(connection_properties, device_info): """Return the device that was returned when connecting a volume.""" if device_info and device_info.get('path'): res = device_info['path'] else: res = connection_properties.get('device_path') or '' # Decode if path is bytes, otherwise leave it as it is return convert_str(res) def connect_volume_undo_prepare_result( f: Optional[Callable] = None, unlink_after: bool = False) -> Callable: """Decorator that returns the device path to how it was originally. WARNING: This decorator must be **the first** decorator of the method to get the actual method signature during introspection. Undo changes made to the device path of encrypted volumes done by the connect_volume_prepare_result decorator. That way the connector will always get back the same device path that it returned. Examples of connector methods that may want to use this are disconnect_volume and extend_volume. It can optionally delete the symlink on successful completion, required for disconnect_volume method. @connect_volume_undo_prepare_result(unlink_after=True) def disconnect_volume(...): @connect_volume_undo_prepare_result def extend_volume(...): """ def decorator(func): @functools.wraps(func) def change_encrypted(*args, **kwargs): # May receive only connection_properties or also device_info params call_args = inspect.getcallargs(func, *args, **kwargs) conn_props = call_args['connection_properties'] custom_symlink = False if conn_props.get('encrypted'): dev_info = call_args.get('device_info') symlink = get_dev_path(conn_props, dev_info) devpath = _device_path_from_symlink(symlink) # Symlink can be a file descriptor, which we don't touch, same # for old symlinks where the path is the same if isinstance(symlink, str) and symlink != devpath: custom_symlink = True # Don't modify the caller's dictionaries call_args['connection_properties'] = conn_props.copy() call_args['connection_properties']['device_path'] = devpath # Same for the device info dictionary if dev_info: dev_info = call_args['device_info'] = dev_info.copy() dev_info['path'] = devpath res = func(**call_args) # Clean symlink only when asked (usually on disconnect) if custom_symlink and unlink_after: try: priv_rootwrap.unlink_root(symlink) except Exception: LOG.warning('Failed to remove encrypted custom symlink %s', symlink) return res return change_encrypted if f: return decorator(f) return decorator def get_device_size(executor: executor.Executor, device: str) -> Optional[int]: """Get the size in bytes of a volume.""" (out, _err) = executor._execute('blockdev', '--getsize64', device, run_as_root=True, root_helper=executor._root_helper) var = str(out.strip()) if var.isnumeric(): return int(var) else: return None def check_valid_device(executor: executor.Executor, path: str) -> bool: cmd = ('dd', 'if=%(path)s' % {"path": path}, 'of=/dev/null', 'count=1') out, info = None, None try: out, info = executor._execute(*cmd, run_as_root=True, root_helper=executor._root_helper) except processutils.ProcessExecutionError as e: LOG.error("Failed to access the device on the path " "%(path)s: %(error)s.", {"path": path, "error": e.stderr}) return False # If the info is none, the path does not exist. return info is not None def get_passphrase_from_secret(key) -> str: """Convert encryption key retrieved from the Key Manager into a passphrase. If the secret type is 'passphrase', assume that the key is already in a suitable string format and simply return it. In any other case, assume a binary key that needs to be converted into an ASCII representation using binascii.hexlify(). Cinder uses 'symmetric' in conjunction with binascii.hexlify() to handle encryption keys for its own volumes and resulting volume images. Nova uses the 'passphrase' type instead for its qcow2+LUKS images which are directly passed to LUKS as passphrase input. User-defined Glance images may reference secrets of any type (defaulting to 'opaque') which we optimistically assume to represent binary keys too (unless their type is 'passphrase' explicitly). :param key: Key Manager Secret containing the encryption key :type key: castellan.common.objects.managed_object.ManagedObject :return: passphrase :rtype: str """ if key.managed_type() == 'passphrase': LOG.debug( "os_brick.utils.get_passphrase_from_secret: the secret is of type " "passphrase and will be used without conversion" ) return key.get_encoded().decode('utf-8') else: LOG.debug( "os_brick.utils.get_passphrase_from_secret: the secret is not of " "type passphrase and will be converted using hex representation" ) return binascii.hexlify(key.get_encoded()).decode('utf-8') class Anything(object): """Object equal to everything.""" def __eq__(self, other): return True def __ne__(self, other): return False def __str__(self): return '' __lt__ = __gt__ = __le__ = __ge__ = __ne__ __repr__ = __str__ ANY = Anything() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/os_brick/version.py0000664000175000017500000000131300000000000017245 0ustar00zuulzuul00000000000000# All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import pbr.version version_info = pbr.version.VersionInfo('os-brick') __version__ = version_info.version_string() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6597588 os_brick-6.11.0/os_brick.egg-info/0000775000175000017500000000000000000000000016702 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/PKG-INFO0000644000175000017500000000273400000000000020003 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: os-brick Version: 6.11.0 Summary: OpenStack Cinder brick library for managing local volume attaches Home-page: https://docs.openstack.org/os-brick/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: pbr>=5.8.0 Requires-Dist: oslo.concurrency>=5.0.0 Requires-Dist: oslo.config>=9.0.0 Requires-Dist: oslo.context>=4.1.0 Requires-Dist: oslo.log>=4.8.0 Requires-Dist: oslo.i18n>=5.1.0 Requires-Dist: oslo.privsep>=3.0.0 Requires-Dist: oslo.serialization>=4.3.0 Requires-Dist: oslo.service>=2.8.0 Requires-Dist: oslo.utils>=6.0.0 Requires-Dist: requests>=2.25.1 Requires-Dist: tenacity>=6.3.1 Requires-Dist: os-win>=5.7.0 Requires-Dist: psutil>=5.7.2 OpenStack Cinder brick library for managing local volume attaches ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/SOURCES.txt0000664000175000017500000003104400000000000020570 0ustar00zuulzuul00000000000000.coveragerc .mailmap .pylintrc .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt mypy-files.txt pylintrc requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/contributor/contributing.rst doc/source/install/index.rst doc/source/reference/index.rst doc/source/reference/os_brick/exception.rst doc/source/reference/os_brick/index.rst doc/source/reference/os_brick/initiator/connector.rst doc/source/reference/os_brick/initiator/index.rst doc/source/user/tutorial.rst etc/os-brick/rootwrap.d/os-brick.filters os_brick/__init__.py os_brick/constants.py os_brick/exception.py os_brick/executor.py os_brick/i18n.py os_brick/opts.py os_brick/utils.py os_brick/version.py os_brick.egg-info/PKG-INFO os_brick.egg-info/SOURCES.txt os_brick.egg-info/dependency_links.txt os_brick.egg-info/entry_points.txt os_brick.egg-info/not-zip-safe os_brick.egg-info/pbr.json os_brick.egg-info/requires.txt os_brick.egg-info/top_level.txt os_brick/caches/__init__.py os_brick/caches/opencas.py os_brick/encryptors/__init__.py os_brick/encryptors/base.py os_brick/encryptors/cryptsetup.py os_brick/encryptors/luks.py os_brick/encryptors/nop.py os_brick/initiator/__init__.py os_brick/initiator/connector.py os_brick/initiator/host_driver.py os_brick/initiator/initiator_connector.py os_brick/initiator/linuxfc.py os_brick/initiator/linuxrbd.py os_brick/initiator/linuxscsi.py os_brick/initiator/storpool_utils.py os_brick/initiator/utils.py os_brick/initiator/connectors/__init__.py os_brick/initiator/connectors/base.py os_brick/initiator/connectors/base_iscsi.py os_brick/initiator/connectors/base_rbd.py os_brick/initiator/connectors/fake.py os_brick/initiator/connectors/fibre_channel.py os_brick/initiator/connectors/fibre_channel_ppc64.py os_brick/initiator/connectors/fibre_channel_s390x.py os_brick/initiator/connectors/gpfs.py os_brick/initiator/connectors/huawei.py os_brick/initiator/connectors/iscsi.py os_brick/initiator/connectors/lightos.py os_brick/initiator/connectors/local.py os_brick/initiator/connectors/nvmeof.py os_brick/initiator/connectors/rbd.py os_brick/initiator/connectors/remotefs.py os_brick/initiator/connectors/scaleio.py os_brick/initiator/connectors/storpool.py os_brick/initiator/connectors/vmware.py os_brick/initiator/windows/__init__.py os_brick/initiator/windows/base.py os_brick/initiator/windows/fibre_channel.py os_brick/initiator/windows/iscsi.py os_brick/initiator/windows/rbd.py os_brick/initiator/windows/smbfs.py os_brick/local_dev/__init__.py os_brick/local_dev/lvm.py os_brick/privileged/__init__.py os_brick/privileged/lightos.py os_brick/privileged/nvmeof.py os_brick/privileged/rbd.py os_brick/privileged/rootwrap.py os_brick/privileged/scaleio.py os_brick/remotefs/__init__.py os_brick/remotefs/remotefs.py os_brick/remotefs/windows_remotefs.py os_brick/tests/__init__.py os_brick/tests/base.py os_brick/tests/test_brick.py os_brick/tests/test_exception.py os_brick/tests/test_executor.py os_brick/tests/test_utils.py os_brick/tests/caches/__init__.py os_brick/tests/caches/test_init.py os_brick/tests/caches/test_opencas.py os_brick/tests/encryptors/__init__.py os_brick/tests/encryptors/test_base.py os_brick/tests/encryptors/test_cryptsetup.py os_brick/tests/encryptors/test_luks.py os_brick/tests/encryptors/test_nop.py os_brick/tests/initiator/__init__.py os_brick/tests/initiator/test_connector.py os_brick/tests/initiator/test_host_driver.py os_brick/tests/initiator/test_linuxfc.py os_brick/tests/initiator/test_linuxrbd.py os_brick/tests/initiator/test_linuxscsi.py os_brick/tests/initiator/test_storpool_utils.py os_brick/tests/initiator/test_utils.py os_brick/tests/initiator/connectors/__init__.py os_brick/tests/initiator/connectors/test_base_iscsi.py os_brick/tests/initiator/connectors/test_base_rbd.py os_brick/tests/initiator/connectors/test_fibre_channel.py os_brick/tests/initiator/connectors/test_fibre_channel_ppc64.py os_brick/tests/initiator/connectors/test_fibre_channel_s390x.py os_brick/tests/initiator/connectors/test_gpfs.py os_brick/tests/initiator/connectors/test_huawei.py os_brick/tests/initiator/connectors/test_iscsi.py os_brick/tests/initiator/connectors/test_iser.py os_brick/tests/initiator/connectors/test_lightos.py os_brick/tests/initiator/connectors/test_local.py os_brick/tests/initiator/connectors/test_nvmeof.py os_brick/tests/initiator/connectors/test_rbd.py os_brick/tests/initiator/connectors/test_remotefs.py os_brick/tests/initiator/connectors/test_scaleio.py os_brick/tests/initiator/connectors/test_storpool.py os_brick/tests/initiator/connectors/test_vmware.py os_brick/tests/local_dev/__init__.py os_brick/tests/local_dev/fake_lvm.py os_brick/tests/local_dev/test_brick_lvm.py os_brick/tests/privileged/__init__.py os_brick/tests/privileged/test_nvmeof.py os_brick/tests/privileged/test_rbd.py os_brick/tests/privileged/test_rootwrap.py os_brick/tests/remotefs/__init__.py os_brick/tests/remotefs/test_remotefs.py os_brick/tests/remotefs/test_windows_remotefs.py os_brick/tests/windows/__init__.py os_brick/tests/windows/fake_win_conn.py os_brick/tests/windows/test_base.py os_brick/tests/windows/test_base_connector.py os_brick/tests/windows/test_factory.py os_brick/tests/windows/test_fibre_channel.py os_brick/tests/windows/test_iscsi.py os_brick/tests/windows/test_rbd.py os_brick/tests/windows/test_smbfs.py releasenotes/notes/add-luks2-support-13563cfe83aba69c.yaml releasenotes/notes/add-vstorage-protocol-b536f4e21d764801.yaml releasenotes/notes/add-windows-fibre-channel-030c095c149da321.yaml releasenotes/notes/add-windows-iscsi-15d6b1392695f978.yaml releasenotes/notes/add-windows-smbfs-d86edaa003130a31.yaml releasenotes/notes/add_custom_keyring_for_rbd_connection-eccbaae9ee5f3491.yaml releasenotes/notes/bp-lightbits-lightos-clustered-nvmetcp-connector-fd8dfd73330973e9.yaml releasenotes/notes/bug-1609753-16eace7f2b48d805.yaml releasenotes/notes/bug-1722432-2408dab55c903c5b.yaml releasenotes/notes/bug-1823200-scaleio-upgrade-3e83b5c9dd148714.yaml releasenotes/notes/bug-1823200-victoria-b414a1806cba3998.yaml releasenotes/notes/bug-1862443-e87ef38b60f9b979.yaml releasenotes/notes/bug-1865754-ceph-octopus-compatibility-0aa9b8bc1b028301.yaml releasenotes/notes/bug-1884052-798094496dccf23c.yaml releasenotes/notes/bug-1888675-mpath-resize-6013ce39fa2b8401.yaml releasenotes/notes/bug-1915678-901a6bd24ecede72.yaml releasenotes/notes/bug-1924652-2323f905f62ef8ba.yaml releasenotes/notes/bug-1929223-powerflex-connector-certificate-validation-cf9ffc98391115d5.yaml releasenotes/notes/bug-1938870-af85c420d1a108a9.yaml releasenotes/notes/bug-1944474-55c5ebb3a37801aa.yaml releasenotes/notes/bug-1945323-4140f5aff3558082.yaml releasenotes/notes/bug-2004630-e94616509a51258c.yaml releasenotes/notes/bug-2013749-3de9f827b82116a2.yaml releasenotes/notes/bug-2033292-storpool-migration-wrong-volume-72ec2f18e39a027a.yaml releasenotes/notes/bug-2034685-dell-powerflex-scaleio-disconnect-volume-c69017cc120306df.yaml releasenotes/notes/bug-nvmeof-connector-support-multipath-kernels-ff6f1f27fdea2c8e.yaml releasenotes/notes/default-timeout-26c838af8b7af9fc.yaml releasenotes/notes/delay-legacy-encryption-provider-name-deprecation-c0d07be3f0d92afd.yaml releasenotes/notes/dell-powerflex-bug-2046810-c16ba2bd8dde06d4.yaml releasenotes/notes/deprecate-plain-cryptsetup-encryptor-0a279abc0b0d718c.yaml releasenotes/notes/deprecate-windows-support-bdc643525e9bb132.yaml releasenotes/notes/disconnect-multipath-cfg-changed-637abc5ecf44fb10.yaml releasenotes/notes/drop-py2-7dcde3ccd0e167b0.yaml releasenotes/notes/drop-python-3-6-and-3-7-1e7190189d415492.yaml releasenotes/notes/encryption-a642889a82ff9207.yaml releasenotes/notes/extend-encrypted-in-use-ac3f7a1994ec3a38.yaml releasenotes/notes/external-locks-9f015988ebdc37d6.yaml releasenotes/notes/fc-always-check-single-wwnn-1595689da0eb673b.yaml releasenotes/notes/fc-flush-single-path-22ed6cc7b56a6d9b.yaml releasenotes/notes/fc-force-disconnect-1a33cf46c233dd04.yaml releasenotes/notes/fc-remote-ports-exception-handling-0c78d46bcfaddce1.yaml releasenotes/notes/fix-extend-multipath-a308d333061665fd.yaml releasenotes/notes/fix-fc-scan-too-broad-3c576e1846b7f05f.yaml releasenotes/notes/fix-fc-scanning-9164da9eb42aaed0.yaml releasenotes/notes/fix-generate-hostnqn-in-case-old-nvmecli.yaml releasenotes/notes/fix-host-uuid-warning-3814b7e47bde8010.yaml releasenotes/notes/fix-iscsi-force-disconnect-2cae1d629191c3cc.yaml releasenotes/notes/fix-multipath-disconnect-819d01e6e981883e.yaml releasenotes/notes/fix-multipath-wait-for-fc-2404c4de2b277267.yaml releasenotes/notes/fix-multipathd-dependency-nvmeof-d61187f1ab3808c2.yaml releasenotes/notes/fix-nvme-issues-8dfc15cb691389fe.yaml releasenotes/notes/improve-get_sysfs_wwn-df38ea88cdcdcc94.yaml releasenotes/notes/improve-iscsi-multipath-detection-f36f28a993f61936.yaml releasenotes/notes/introduce-encryption-provider-constants-a7cd0ce58da2bae8.yaml releasenotes/notes/iscsi_manual_scan_support-d64a1c3c8e1986b4.yaml releasenotes/notes/local-attach-in-rbd-connector-c06347fb164b084a.yaml releasenotes/notes/lock_path-c1c58a253391b41c.yaml releasenotes/notes/lvm-delete-error-76f2cc9d8dc91f01.yaml releasenotes/notes/multipath-improvements-596c2c6eadfba6ea.yaml releasenotes/notes/multipath-nvme-f77a53eb2717a44c.yaml releasenotes/notes/no-systool-use-b7bc430de1033670.yaml releasenotes/notes/notice-37378e268b52ed89.yaml releasenotes/notes/nvme-flush-f31ab337224e5d3d.yaml releasenotes/notes/nvme-hostnqn-c2611dc56729183b.yaml releasenotes/notes/nvme-rsd-support-d487afd77c534fa1.yaml releasenotes/notes/nvmeof-connecting-788f77a42fe7dd3b.yaml releasenotes/notes/nvmeof-consolidate-004dbe3a98f6f815.yaml releasenotes/notes/nvmeof-create-hostid-15bf84ec00726fad.yaml releasenotes/notes/nvmeof-disconnect-83f9aaf17f8c8988.yaml releasenotes/notes/nvmeof-faster-create-hostnqn-81a63844142858bf.yaml releasenotes/notes/nvmeof-findmnt-args-b2c966af83bd3bf3.yaml releasenotes/notes/nvmeof-hide-traceback-a968ab71352684e3.yaml releasenotes/notes/nvmeof-multiple-volumes-within-subsystem-support-05879c1c3bdf52c9.yaml releasenotes/notes/nvmeof-new-address-56044523cf8fc203.yaml releasenotes/notes/nvmeof-old-shownqn-c8cc2820b9c1418e.yaml releasenotes/notes/nvmeof-support-v2-0d3a423c26eee003.yaml releasenotes/notes/privsep-logs-9e938e5a2aee042e.yaml releasenotes/notes/rbd-disconnect-failure-9efa6932df40271b.yaml releasenotes/notes/rbd-non-openstack-support-28ee093d7d3a700e.yaml releasenotes/notes/rbd-windows-support-ef6e8184842409dd.yaml releasenotes/notes/rbd_check_valid_device-2f50c0639adb8e7c.yaml releasenotes/notes/rbd_extend_volume-5bc6adc08f662c5b.yaml releasenotes/notes/refactor_iscsi_connect-dfbb24305a954783.yaml releasenotes/notes/refactor_iscsi_disconnect-557f4173bc1ae4ed.yaml releasenotes/notes/remove-aoe-7a97315a73c7b24f.yaml releasenotes/notes/remove-bug-1633518-workaround-75c2e26843660696.yaml releasenotes/notes/remove-disco-0809537ffb8c50eb.yaml releasenotes/notes/remove-drbd-21872230fcac1138.yaml releasenotes/notes/remove-hgst-daa7f07c307974d0.yaml releasenotes/notes/remove-hyperscale-468f1b61bf4dadf8.yaml releasenotes/notes/remove-old-constants-20021f5b30bde890.yaml releasenotes/notes/remove-sheepdog-611257b28bc88934.yaml releasenotes/notes/scaleio-extend-attached-ec44d3a72395882c.yaml releasenotes/notes/scsi-addressing-modes-7674ea30d4ff4c49.yaml releasenotes/notes/start-using-reno-23e8d5f1a30851a1.yaml releasenotes/notes/storpool-move-api-and-config-code-in-tree-62f41ec44a8a7b7d.yaml releasenotes/notes/unsupported-lvm-versions-cbaeabce3ace1805.yaml releasenotes/notes/update-nvmeof-connector-6260a658c15a9a6e.yaml releasenotes/notes/ussuri-release-979d709dfa7df068.yaml releasenotes/notes/veritas-hyperscale-connector-fe56cec68b1947cd.yaml releasenotes/notes/vmware-vmdk-connector-19e6999e6cae43cd.yaml releasenotes/notes/wait-mpath-io-703605e74ee009ef.yaml releasenotes/notes/yoga-known-issues-f1248af0e328d63e.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/coding-checks.sh tools/fast8.sh tools/generate_connector_list.py tools/lintstack.py tools/lintstack.sh tools/mypywrap.sh././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022750 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/entry_points.txt0000664000175000017500000000017100000000000022177 0ustar00zuulzuul00000000000000[oslo.config.opts] os_brick = os_brick.opts:list_opts [oslo.config.opts.defaults] os_brick = os_brick.opts:set_defaults ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/not-zip-safe0000664000175000017500000000000100000000000021130 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/pbr.json0000664000175000017500000000005600000000000020361 0ustar00zuulzuul00000000000000{"git_version": "6e83ac6", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/requires.txt0000664000175000017500000000037400000000000021306 0ustar00zuulzuul00000000000000pbr>=5.8.0 oslo.concurrency>=5.0.0 oslo.config>=9.0.0 oslo.context>=4.1.0 oslo.log>=4.8.0 oslo.i18n>=5.1.0 oslo.privsep>=3.0.0 oslo.serialization>=4.3.0 oslo.service>=2.8.0 oslo.utils>=6.0.0 requests>=2.25.1 tenacity>=6.3.1 os-win>=5.7.0 psutil>=5.7.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149696.0 os_brick-6.11.0/os_brick.egg-info/top_level.txt0000664000175000017500000000001100000000000021424 0ustar00zuulzuul00000000000000os_brick ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/pylintrc0000664000175000017500000000165600000000000015214 0ustar00zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. disable=C0111,W0511,W0142,W0622 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] dummy-variables-rgx=_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.5877593 os_brick-6.11.0/releasenotes/0000775000175000017500000000000000000000000016106 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1740149696.651759 os_brick-6.11.0/releasenotes/notes/0000775000175000017500000000000000000000000017236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/add-luks2-support-13563cfe83aba69c.yaml0000664000175000017500000000033600000000000025722 0ustar00zuulzuul00000000000000--- features: - | A LUKS2 encryptor has been introduced providing support for this latest version of the Linux Unified Key Setup disk encryption format. This requires ``cryptsetup`` version 2.0.0 or greater. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/add-vstorage-protocol-b536f4e21d764801.yaml0000664000175000017500000000011400000000000026412 0ustar00zuulzuul00000000000000--- features: - Added vStorage protocol support for RemoteFS connections. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/add-windows-fibre-channel-030c095c149da321.yaml0000664000175000017500000000007700000000000027076 0ustar00zuulzuul00000000000000--- features: - Add Windows Fibre Channel connector support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/add-windows-iscsi-15d6b1392695f978.yaml0000664000175000017500000000006700000000000025475 0ustar00zuulzuul00000000000000--- features: - Add Windows iSCSI connector support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/add-windows-smbfs-d86edaa003130a31.yaml0000664000175000017500000000006700000000000025643 0ustar00zuulzuul00000000000000--- features: - Add Windows SMBFS connector support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/add_custom_keyring_for_rbd_connection-eccbaae9ee5f3491.yaml0000664000175000017500000000022600000000000032430 0ustar00zuulzuul00000000000000--- fixes: - Add support to use custom Ceph keyring files (previously os-brick hardcoded using /etc/ceph/.client..keyring file). ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=os_brick-6.11.0/releasenotes/notes/bp-lightbits-lightos-clustered-nvmetcp-connector-fd8dfd73330973e9.yaml 22 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bp-lightbits-lightos-clustered-nvmetcp-connector-fd8dfd73330973e90000664000175000017500000000063500000000000033114 0ustar00zuulzuul00000000000000--- features: - | Lightbits LightOS connector: new os-brick connector for Lightbits(TM) LightOS(R). Lightbits Labs (https://www.lightbitslabs.com) LightOS is software-defined, cloud native, high-performance, clustered scale-out and redundant NVMe/TCP storage that performs like local NVMe flash. This connector requires the Lightbits discovery-client, available from Lightbits Labs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1609753-16eace7f2b48d805.yaml0000664000175000017500000000023600000000000024034 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1609753 `_: Fixed resizing multipath device when user friendly names are ON. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1722432-2408dab55c903c5b.yaml0000664000175000017500000000046300000000000023731 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1722432 `_] Changes the supported_transports to support tcp transport. With this change, we can define an custom iface with tcp transport to limit the storage traffic only be transimitted via storage NIC we specified. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1823200-scaleio-upgrade-3e83b5c9dd148714.yaml0000664000175000017500000000101000000000000026712 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1823200 `_: Prior fixes for this bug changed the connection properties but did not take into account an upgrade scenario in which currently attached volumes had the old format connection properties and could fail on detatch with "KeyError: 'config_group'". This release updates the 'scaleio' connector to handle this situation. It is only applicable to deployments using a Dell EMC PowerFlex/VxFlex OS/ScaleIO backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1823200-victoria-b414a1806cba3998.yaml0000664000175000017500000000272400000000000025472 0ustar00zuulzuul00000000000000--- security: - | Dell EMC VxFlex OS driver: This release contains a fix for `Bug #1823200 `_. See `OSSN-0086 `_ for details. upgrade: - | The fix for `Bug #1823200 `_ requires that a configuration file be deployed on compute nodes, cinder nodes, and anywhere you would perform a volume attachment in your deployment, when using Cinder with a Dell EMC VxFlex OS backend. See the `Dell EMC VxFlex OS (ScaleIO) Storage driver `_ documentation for details about this configuration file. fixes: - | `Bug #1823200 `_: This release contains an updated connector for use with the Dell EMC VxFlex OS backend. It requires that a configuration file be deployed on compute nodes, cinder nodes, and anywhere you would perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS (ScaleIO) Storage driver `_ documentation for details about the configuration file, and see `OSSN-0086 `_ for more information about the security vulnerability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1862443-e87ef38b60f9b979.yaml0000664000175000017500000000024700000000000024004 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1862433 `_] Fix an issue where platform id is needed to determine name of scsi disk. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1865754-ceph-octopus-compatibility-0aa9b8bc1b028301.yaml0000664000175000017500000000115400000000000031207 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1865754 `_: the ``RBDConnector`` class generates a temporary configuration file to connect to Ceph. Previously, os-brick did not include a ``[global]`` section to contain the options it sets, but with the Octopus release (15.2.0+), Ceph has begun enforcing the presence of this section marker, which dates back at least to the Hammer release of Ceph. With this release, os-brick includes the ``[global]`` section in the generated configuration file, which should be backward-compatible at least to Ceph Hammer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1884052-798094496dccf23c.yaml0000664000175000017500000000020400000000000023701 0ustar00zuulzuul00000000000000--- fixes: - | Fix an incompatibility with ceph 13.2.0 (Mimic) or later, caused by a change in the output of ``rbd map``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1888675-mpath-resize-6013ce39fa2b8401.yaml0000664000175000017500000000024600000000000026302 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1888675 `_: Fixed in-use volume resize issues caused by the multipath-tools version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1915678-901a6bd24ecede72.yaml0000664000175000017500000000040400000000000024110 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1915678 `_: Fix unhandled exception during iscsi volume attachment with multipath enabled that resulted in the cinder-volume service becoming stuck and requiring a restart. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1924652-2323f905f62ef8ba.yaml0000664000175000017500000000057400000000000023756 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1924652 `_: Fix issue with newer multipathd implementations where path devices are kept in multipathd even after volume detachment completes, preventing it from creating a multipath device when a new device attachment is made shortly with the same volume device or the same device path. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=os_brick-6.11.0/releasenotes/notes/bug-1929223-powerflex-connector-certificate-validation-cf9ffc98391115d5.yaml 22 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1929223-powerflex-connector-certificate-validation-cf9ffc98390000664000175000017500000000024500000000000032674 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1929223 `_: Fixed HTTPS certificate validation was disabled in PowerFlex connector. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1938870-af85c420d1a108a9.yaml0000664000175000017500000000025300000000000023743 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1938870 `_: Fixed KumoScale Driver replicated volume missing portals attaches without raid. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1944474-55c5ebb3a37801aa.yaml0000664000175000017500000000033000000000000024012 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1944474 `_: Fixed missing retries to reinitiate iSCSI connections with high concurrency of connections and with multipath enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-1945323-4140f5aff3558082.yaml0000664000175000017500000000035400000000000023606 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1945323 `_ [bugs.launchpad.net]: Fixed a regression where connect_volume returned a list with a single path instead of just the path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-2004630-e94616509a51258c.yaml0000664000175000017500000000045300000000000023520 0ustar00zuulzuul00000000000000--- fixes: - | PowerFlex driver `bug #2004630 `_: Fixed regression which caused the PowerFlex (ScaleIO) connector to login multiple times, even with an existing valid token. The ScaleIO connector now caches and reuses the token. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-2013749-3de9f827b82116a2.yaml0000664000175000017500000000047400000000000023673 0ustar00zuulzuul00000000000000--- fixes: - | PowerFlex driver `Bug #2013749 `_: Added new error code for powerflex 4.x which was causing this error. Added handling for the new error code returned from Powerflex 4.x when a we try to map a volume which is already mapped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-2033292-storpool-migration-wrong-volume-72ec2f18e39a027a.yaml0000664000175000017500000000027700000000000032247 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2033292 `_: Fixed possible attachment of the wrong Cinder volume after a successful migration of a live VM. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=os_brick-6.11.0/releasenotes/notes/bug-2034685-dell-powerflex-scaleio-disconnect-volume-c69017cc120306df.yaml 22 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-2034685-dell-powerflex-scaleio-disconnect-volume-c69017cc12030000664000175000017500000000041300000000000032230 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerFlex driver `Bug #2034685 `_: Added a retry mechanism to check if the disconnected device is actually removed from the host ensuring that subsequent connections succeed. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=os_brick-6.11.0/releasenotes/notes/bug-nvmeof-connector-support-multipath-kernels-ff6f1f27fdea2c8e.yaml 22 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/bug-nvmeof-connector-support-multipath-kernels-ff6f1f27fdea2c8e.y0000664000175000017500000000041100000000000033370 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1943615 `_: Fixed get nvme device failing on kernels with multipath enabled by using the generic form ``/sys/class/block/n*`` for finding nvme devices. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/default-timeout-26c838af8b7af9fc.yaml0000664000175000017500000000024200000000000025624 0ustar00zuulzuul00000000000000--- fixes: - | Add a 10 minutes default timeout to shell commands executed through subprocess to prevent the possibility of them getting stuck forever. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=os_brick-6.11.0/releasenotes/notes/delay-legacy-encryption-provider-name-deprecation-c0d07be3f0d92afd.yaml 22 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/delay-legacy-encryption-provider-name-deprecation-c0d07be3f0d92af0000664000175000017500000000056200000000000033236 0ustar00zuulzuul00000000000000--- deprecations: - | The direct use of the encryption provider classes such as os_brick.encryptors.luks.LuksEncryptor continues to be deprecated and will now be blocked in the Queens release of os-brick. The use of out of tree encryption provider classes also continues to be deprecated and will also be blocked in the Queens release of os-brick. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/dell-powerflex-bug-2046810-c16ba2bd8dde06d4.yaml0000664000175000017500000000055100000000000027076 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerFlex driver `Bug #2046810 `_: Added new error code for PowerFlex 4.x which was causing this error. Added handling for the new error code returned from PowerFlex v4.x and allow the driver to ignore and skip a disconnect operation when the volume is not mapped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/deprecate-plain-cryptsetup-encryptor-0a279abc0b0d718c.yaml0000664000175000017500000000044000000000000031677 0ustar00zuulzuul00000000000000--- deprecations: - | The plain CryptsetupEncryptor is deprecated and will be removed in a future release. Existing users are encouraged to retype any existing volumes using this encryptor to the luks LuksEncryptor or luks2 Luks2Encryptor encryptors as soon as possible ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/deprecate-windows-support-bdc643525e9bb132.yaml0000664000175000017500000000021500000000000027465 0ustar00zuulzuul00000000000000--- deprecations: - | Support for Windows operating systems has been deprecated, because of retirement of the Winstackers project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/disconnect-multipath-cfg-changed-637abc5ecf44fb10.yaml0000664000175000017500000000033500000000000030754 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1921381 `_: Fix disconnecting volumes when the use_multipath value is changed from the connect_volume call to the disconnect_volume call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/drop-py2-7dcde3ccd0e167b0.yaml0000664000175000017500000000025600000000000024227 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Beginning with os-brick release 3.0.0, the minimum version of Python supported by os-brick is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/drop-python-3-6-and-3-7-1e7190189d415492.yaml0000664000175000017500000000020100000000000025670 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/encryption-a642889a82ff9207.yaml0000664000175000017500000000076200000000000024415 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1964379 `_: Fixed using non LUKS v1 encrypted volumes, as once one of such volumes is disconnected from a host all successive NVMe-oF attachments would fail. - | `Bug #1967790 `_: Fixed encryptor's connect_volume returns and the symlink is pointing to the raw block device instead of to the decrypted device mapper device. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/extend-encrypted-in-use-ac3f7a1994ec3a38.yaml0000664000175000017500000000043400000000000027103 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1967157 `_: Fixed extending LUKS and LUKSv2 host attached encrypted volumes. Only LUKS v1 volumes decrypted via libvirt were working, but now all LUKS based in-use encrypted volumes can be extended. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/external-locks-9f015988ebdc37d6.yaml0000664000175000017500000000072300000000000025314 0ustar00zuulzuul00000000000000--- upgrade: - | Services using os-brick need to set the ``lock_path`` configuration option in their ``[oslo_concurrency]`` section since it doesn't have a valid default (related `bug #1947370 `_). fixes: - | `Bug #1947370 `_: Fixed race conditions on iSCSI with shared targets and NVMe ``connect_volume`` and ``disconnect_volume`` calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fc-always-check-single-wwnn-1595689da0eb673b.yaml0000664000175000017500000000075300000000000027504 0ustar00zuulzuul00000000000000--- fixes: - | Always check if we are dealing with a single WWNN Fibre Channel target, even when we receive an initiator_target_map. This allows us to exclude unconnected HBAs from our scan for storage arrays that automatically connect all target ports (due to their architecture and design) even if the Cinder driver returns the initiator_target_map, provided the target has a single WWNN. Excluding these HBAs prevents undesired volumes from being connected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fc-flush-single-path-22ed6cc7b56a6d9b.yaml0000664000175000017500000000034200000000000026421 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1897787 `_: Fix Fibre Channel not flushing volumes on detach when a multipath connection was requested on their attach, but one was not found. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fc-force-disconnect-1a33cf46c233dd04.yaml0000664000175000017500000000020700000000000026133 0ustar00zuulzuul00000000000000--- features: - | FC connector: Added support to ``force`` and ``ignore_errors`` parameters on ``disconnect_volume`` method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fc-remote-ports-exception-handling-0c78d46bcfaddce1.yaml0000664000175000017500000000054300000000000031442 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2098954 `_: Fibre Channel: Fixed issue with partial scanning in case of FC connection. This is addressed by exception handling when we don't get a valid path in ``fc_transport`` for a target WWPN leading to no search being performed in ``fc_remote_ports`` path. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-extend-multipath-a308d333061665fd.yaml0000664000175000017500000000057500000000000026261 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2032177 `_: Handled the issue when one or more paths of a multipath device are down and we try to extend the device. This is addressed by adding an additional check to verify that all devices are up before extending the multipath device to avoid leaving devices with inconsistent sizes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-fc-scan-too-broad-3c576e1846b7f05f.yaml0000664000175000017500000000034500000000000026255 0ustar00zuulzuul00000000000000--- fixes: - | Fix an issue where SCSI LUN scans for FC were unnecessarily too broad. Now OS-Brick will not use wildcards unless it doesn't find any target ports in sysfs and the Cinder driver doesn't disable them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-fc-scanning-9164da9eb42aaed0.yaml0000664000175000017500000000042500000000000025444 0ustar00zuulzuul00000000000000--- fixes: - | FC connector `bug #2051237 `_: Fix issue with fibre channel connector scanning partial targets. We search for target information in sysfs, first in fc_transport and then in fc_report_ports. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-generate-hostnqn-in-case-old-nvmecli.yaml0000664000175000017500000000054300000000000027716 0ustar00zuulzuul00000000000000--- fixes: - | The nvmeof connector relies on nvme-cli to query the host-nqn. Versions of nvme-cli < 1.10 do not have the 'show-hostnqn' command, which could cause the connector to fail to generate the hostnqn. Fixed the connector to either get or generate the hostnqn with versions of nvme-cli<1.10 that do not have 'show-hostnqn'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-host-uuid-warning-3814b7e47bde8010.yaml0000664000175000017500000000013400000000000026422 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an warning seen when running in a container using ``overlayfs``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-iscsi-force-disconnect-2cae1d629191c3cc.yaml0000664000175000017500000000035300000000000027531 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2012251 `_: Fixed issue when disconnecting iSCSI volume when ``force`` and ``ignore_errors`` are set to ``True`` and flushing multipath device fails. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-multipath-disconnect-819d01e6e981883e.yaml0000664000175000017500000000060500000000000027143 0ustar00zuulzuul00000000000000--- fixes: - | Under certain conditions detaching a multipath device may result in failure when flushing one of the individual paths, but the disconnect should have succeeded, because there were other paths available to flush all the data. The multipath disconnect mechanism is now more robust and will only fail when disconnecting if multipath would lose data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-multipath-wait-for-fc-2404c4de2b277267.yaml0000664000175000017500000000030200000000000027076 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2097388 `_: Fibre Channel: Fixed issue while waiting for multipath device when it is not populated in sysfs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-multipathd-dependency-nvmeof-d61187f1ab3808c2.yaml0000664000175000017500000000046200000000000030621 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #2085013 `_ : Removed ``multipathd`` dependency for NVMe-oF connections. Previously, multipathd had to be installed to leverage native NVMe multipathing (ANA) but now that dependency is removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/fix-nvme-issues-8dfc15cb691389fe.yaml0000664000175000017500000000054500000000000025504 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1929074 `_: Fixed issue with nvme logging error trace when not present. - | NVMe-oF connector `bug #1929075 `_: Fixed issue with nvme connector creating /etc/nvme directory when not present.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/improve-get_sysfs_wwn-df38ea88cdcdcc94.yaml0000664000175000017500000000014400000000000027232 0ustar00zuulzuul00000000000000--- fixes: - | Improve WWN detection for arrays with multiple designators. (bug 1881608). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/improve-iscsi-multipath-detection-f36f28a993f61936.yaml0000664000175000017500000000021000000000000030766 0ustar00zuulzuul00000000000000--- fixes: - | Improve iSCSI multipath detection to work even if we cannot find the volume's WWN in sysfs. (bug 1881619). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/introduce-encryption-provider-constants-a7cd0ce58da2bae8.yaml0000664000175000017500000000121400000000000032656 0ustar00zuulzuul00000000000000--- features: - | Encryption provider constants have been introduced detailing the supported encryption formats such as LUKs along with their associated in-tree provider implementations. These constants should now be used to identify an encryption provider implementation for a given encryption format. deprecations: - | The direct use of the encryption provider classes such as os_brick.encryptors.luks.LuksEncryptor is now deprecated and will be blocked in the Pike release of os-brick. The use of out of tree encryption provider classes is also deprecated and will be blocked in the Pike release of os-brick. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/iscsi_manual_scan_support-d64a1c3c8e1986b4.yaml0000664000175000017500000000062400000000000027614 0ustar00zuulzuul00000000000000--- features: - | Support for setting the scan mode on the Open-iSCSI initiator. If installed iscsiadm supports this feature OS-Brick will set all it's new sessions to manual scan. fixes: - | On systems with scan mode support on open-iSCSI we'll no longer see unwanted devices polluting our system due to the automatic initiator scan or to AEN/AER messages from the backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/local-attach-in-rbd-connector-c06347fb164b084a.yaml0000664000175000017500000000021500000000000027743 0ustar00zuulzuul00000000000000--- features: - Local attach feature in RBD connector. We use RBD kernel module to attach and detach volumes locally without Nova. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/lock_path-c1c58a253391b41c.yaml0000664000175000017500000000141100000000000024206 0ustar00zuulzuul00000000000000--- features: - | Specific location for os-brick file locks using the ``lock_path`` configuration option in the ``os_brick`` configuration group. Previously, os-brick used the consuming service's lock_path for its locks, but there are some deployment configurations (for example, Nova and Cinder collocated on the same host) where this would result in anomalous behavior. Default is to use the consuming service's lock_path. This change requires a consuming service to call the ``os_brick.setup`` method after service configuration options have been called. upgrade: - | To use the os-brick specific file lock location introduced in this release, an external service using the library must call the ``os_brick.setup`` method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/lvm-delete-error-76f2cc9d8dc91f01.yaml0000664000175000017500000000031100000000000025607 0ustar00zuulzuul00000000000000--- fixes: - | LVM driver `bug #1901783 `_: Fix unexpected delete volume failure due to unexpected exit code 139 on ``lvs`` command call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/multipath-improvements-596c2c6eadfba6ea.yaml0000664000175000017500000000006300000000000027374 0ustar00zuulzuul00000000000000--- fixes: - Improved multipath device handling. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/multipath-nvme-f77a53eb2717a44c.yaml0000664000175000017500000000034100000000000025310 0ustar00zuulzuul00000000000000--- features: - | Phase 1 (Native) implementation of NVMeoF Multipathing. See the `NVMeoF Multipathing `_ spec for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/no-systool-use-b7bc430de1033670.yaml0000664000175000017500000000021000000000000025152 0ustar00zuulzuul00000000000000--- upgrade: - | No longer using ``systool`` to gather FC HBA information, so the ``sysfsutils`` package is no longer needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/notice-37378e268b52ed89.yaml0000664000175000017500000000037400000000000023510 0ustar00zuulzuul00000000000000--- features: - | Extend the StorPool API client to support the StorPool iSCSI configuration API functionality. upgrade: - | Upgrading to this release is not required; it is an interim version affecting only the StorPool connector. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvme-flush-f31ab337224e5d3d.yaml0000664000175000017500000000023400000000000024411 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1903032 `_: Fixed not flushing device on disconnect. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvme-hostnqn-c2611dc56729183b.yaml0000664000175000017500000000035100000000000024632 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1928944 `_: Fixed not returning the right nqn value on ``get_connector_properties`` when ``/etc/nvme/hostnqn`` doesn't exist. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvme-rsd-support-d487afd77c534fa1.yaml0000664000175000017500000000011500000000000025670 0ustar00zuulzuul00000000000000--- features: - | Extended nvme connector to support RSD with NVMe-oF. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-connecting-788f77a42fe7dd3b.yaml0000664000175000017500000000033400000000000026057 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #2035695 `_: Fixed attaching volumes when all portals of a subsystem are reconnecting at the time of the request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-consolidate-004dbe3a98f6f815.yaml0000664000175000017500000000331400000000000026140 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1964395 `_: Fixed dependence on a specific nvme cli version for proper detection of devices when attaching a volume. - | NVMe-oF connector `bug #1964388 `_: Fixed corner case where it could return the wrong path for a volume, resulting in attaching in Nova the wrong volume to an instance, destroying volume data in Cinder, and other similarly dangerous scenarios. - | NVMe-oF connector `bug #1964385 `_: Fixed disappearance of volumes/devices from the host, with potential data loss of unflushed data, when network issues last longer than 10 minutes. - | NVMe-oF connector `bug #1964380 `_: Fixed support for newer nvme cli exit code when trying to connect to an already subsystem-portal. - | NVMe-oF connector `bug #1964383 `_: Fixed not being able to attach a volume if there was already a controller for the subsystem. - | NVMe-oF connector `bug #1965954 `_: Fixed extend of in-use replicated volumes with a single replica not growing the RAID - | NVMe-oF connector `bug #1964590 `_: Fixed extend failure of in-use volumes with some Cinder drivers. - | NVMe-oF connector `bug #1903032 `_: Fixed not flushing single connection volumes on some Cinder drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-create-hostid-15bf84ec00726fad.yaml0000664000175000017500000000044500000000000026441 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF `bug #2016029 `_: The NVMe-oF connector now creates `/etc/nvme/hostid` when it's missing from the system. That way the Host ID will persist and always be the same, instead of being randomly generated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-disconnect-83f9aaf17f8c8988.yaml0000664000175000017500000000051000000000000026010 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1961102 `_: Fixed leaving controller devices (i.e., /dev/nvme0) behind on hosts. Now NVMe-oF subsytems are disconnected when disconnecting volumes if the subsytem doesn't have additional volumes present in the host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-faster-create-hostnqn-81a63844142858bf.yaml0000664000175000017500000000044600000000000027637 0ustar00zuulzuul00000000000000--- features: - | NVMe-oF connector: Improved speed of creation of the ``/etc/nvme/hostnqn`` file. - | NVMe-oF connector: Always write the same value in the same system for the ``/etc/nvme/hostnqn`` file in older nvme-cli versions when system UUID can be read from DMI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-findmnt-args-b2c966af83bd3bf3.yaml0000664000175000017500000000026300000000000026363 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #2026257 `_: Fixes _get_host_uuid for nvmeof failing when using a btrfs root fs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-hide-traceback-a968ab71352684e3.yaml0000664000175000017500000000022700000000000026325 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1964389 `_: Fixed showing misleading traceback. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=os_brick-6.11.0/releasenotes/notes/nvmeof-multiple-volumes-within-subsystem-support-05879c1c3bdf52c9.yaml 22 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-multiple-volumes-within-subsystem-support-05879c1c3bdf52c90000664000175000017500000000021100000000000033257 0ustar00zuulzuul00000000000000--- features: - | NVMe-OF connector: Added support for storage systems presenting multiple volumes within one NVMe subsystem. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-new-address-56044523cf8fc203.yaml0000664000175000017500000000040500000000000025676 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #2035811 `_: Fixed attaching volumes on systems using newer NVMe kernel modules that present additional information in ``/sys/class/nvme/nvme#/address``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-old-shownqn-c8cc2820b9c1418e.yaml0000664000175000017500000000025600000000000026101 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #2035606 `_: Fixed generation of hostnqn file on old nvme-cli versions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/nvmeof-support-v2-0d3a423c26eee003.yaml0000664000175000017500000000022700000000000025647 0ustar00zuulzuul00000000000000--- fixes: - | NVMe-oF connector `bug #1961222 `_: Fixed support of newer NVMe CLI v2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/privsep-logs-9e938e5a2aee042e.yaml0000664000175000017500000000066400000000000025064 0ustar00zuulzuul00000000000000--- features: - | Support separate privsep logging levels. Now services using os-brick can have different log levels for the service and os-brick's privsep calls. The service usually uses the default "oslo_privsep.daemon" name while os-brick now uses "os_brick.privileged". fixes: - | `Bug #1922052 `_: Fixed missing os-brick DEBUG logs on nova compute service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/rbd-disconnect-failure-9efa6932df40271b.yaml0000664000175000017500000000031400000000000026663 0ustar00zuulzuul00000000000000--- fixes: - | RBD connector `bug #1981455 `_: Fixed AttributeError error on disconnect for RBD encrypted volumes not using host attachments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/rbd-non-openstack-support-28ee093d7d3a700e.yaml0000664000175000017500000000012600000000000027373 0ustar00zuulzuul00000000000000--- features: - | Add support for RBD non OpenStack (cinderlib) attach/detach. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/rbd-windows-support-ef6e8184842409dd.yaml0000664000175000017500000000024600000000000026250 0ustar00zuulzuul00000000000000--- features: - | RBD volumes can now be attached to Windows hosts and Hyper-V VMs. The minimum requirements are Ceph 16 (Pacific) and Windows Server 2016. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/rbd_check_valid_device-2f50c0639adb8e7c.yaml0000664000175000017500000000017000000000000027017 0ustar00zuulzuul00000000000000--- fixes: - | Fix check_valid_path method in the RBD connector for locally attached volumes. (Bug #1884552) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/rbd_extend_volume-5bc6adc08f662c5b.yaml0000664000175000017500000000014300000000000026200 0ustar00zuulzuul00000000000000--- fixes: - | Implement the extend_volume method for the RBD connector. (Bug #1884554). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/refactor_iscsi_connect-dfbb24305a954783.yaml0000664000175000017500000000014700000000000026772 0ustar00zuulzuul00000000000000--- fixes: - | iSCSI connect mechanism refactoring to be faster, more robust, more reliable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/refactor_iscsi_disconnect-557f4173bc1ae4ed.yaml0000664000175000017500000000077300000000000027635 0ustar00zuulzuul00000000000000--- features: - | New parameters on `disconnect_volume` named `force` and `ignore_errors` can be used to let OS-Brick know that data loss is secondary to leaving a clean system with no leftover devices. If `force` is not set, or set to False, preventing data loss will take priority. Currently only iSCSI implements these new parameters. fixes: - | iSCSI disconnect refactoring improves reliability, speed, and thoroughness on leaving a cleaner system after disconnection. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-aoe-7a97315a73c7b24f.yaml0000664000175000017500000000027200000000000024323 0ustar00zuulzuul00000000000000--- upgrade: - | The CORAID driver was removed from Cinder in the Ocata release. The AOE protocol connector logic in os-brick is no longer needed and has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-bug-1633518-workaround-75c2e26843660696.yaml0000664000175000017500000000046500000000000027153 0ustar00zuulzuul00000000000000--- upgrade: - | A workaround for `Bug #1633518 `_, where mangled passwords were used for various encryptors, has been removed. This was first introduced way back in the 1.9.0 Ocata-era release and has had more than enough time to bed in. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-disco-0809537ffb8c50eb.yaml0000664000175000017500000000026100000000000024737 0ustar00zuulzuul00000000000000--- upgrade: - | The ITRI DISCO driver was removed from Cinder in the Stein release. The connector logic in os-brick is no longer needed and has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-drbd-21872230fcac1138.yaml0000664000175000017500000000027100000000000024370 0ustar00zuulzuul00000000000000--- upgrade: - | The DRBD driver was removed from Cinder in the Stein release by the vendor. The connector logic in os-brick is no longer needed and has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-hgst-daa7f07c307974d0.yaml0000664000175000017500000000025300000000000024576 0ustar00zuulzuul00000000000000--- upgrade: - | The HGST driver was removed from Cinder in the Stein release. The connector logic in os-brick is no longer needed and has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-hyperscale-468f1b61bf4dadf8.yaml0000664000175000017500000000027100000000000026136 0ustar00zuulzuul00000000000000--- upgrade: - | The Veritas Hyperscale driver was removed from Cinder in the Train release. The connector logic in os-brick is no longer needed and has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-old-constants-20021f5b30bde890.yaml0000664000175000017500000000047400000000000026330 0ustar00zuulzuul00000000000000--- upgrade: - | The location for connector constants was moved in the 1.6.0 release, but their old location was kept for backwards compatibility. These legacy constants are now being removed and any out of tree code should be updated to use the latest location (os_brick.initiator.CONSTANT_NAME). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/remove-sheepdog-611257b28bc88934.yaml0000664000175000017500000000031500000000000025216 0ustar00zuulzuul00000000000000--- upgrade: - | The Sheepdog project is no longer active and its driver has been removed from Cinder. The connector and Sheepdog related handling has now been removed from os-brick as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/scaleio-extend-attached-ec44d3a72395882c.yaml0000664000175000017500000000010300000000000026737 0ustar00zuulzuul00000000000000--- features: - Added ability to extend attached ScaleIO volumes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/scsi-addressing-modes-7674ea30d4ff4c49.yaml0000664000175000017500000000056300000000000026543 0ustar00zuulzuul00000000000000--- features: - | iSCSI and FCP: Support for different SCSI addressing modes: SAM, SAM-2, and SAM-3 flat addressing. Defaults to SAM/transparent, but cinder drivers can set key ``addressing_mode`` in the connection properties to indicate other addressing modes using one of the constants from ``os_brick.constants.SCSI_ADDRESSING_*`` as the value. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/start-using-reno-23e8d5f1a30851a1.yaml0000664000175000017500000000007100000000000025464 0ustar00zuulzuul00000000000000--- other: - Start using reno to manage release notes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/storpool-move-api-and-config-code-in-tree-62f41ec44a8a7b7d.yaml0000664000175000017500000000043200000000000032267 0ustar00zuulzuul00000000000000--- other: - | A new in-tree implementation for communicating with the StorPool API and reading StorPool configuration files. The StorPool backend no longer requires the OpenStack nodes to have the Python packages `storpool` and `storpool.spopenstack` installed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/unsupported-lvm-versions-cbaeabce3ace1805.yaml0000664000175000017500000000060600000000000027733 0ustar00zuulzuul00000000000000--- upgrade: - | Starting with this release, we removed checks for the LVM features having versions < 2.02.115. It includes the following: * LV activation support (2.02.91) * Thin provisioning (2.02.95) * --ignoreactivationskip (2.02.99) * lvcreate -l 100%FREE (2.02.115) Now the minimum supported LVM version is 2.02.115 which was released in 2015. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/update-nvmeof-connector-6260a658c15a9a6e.yaml0000664000175000017500000000071200000000000027022 0ustar00zuulzuul00000000000000--- features: - | NVMeOF connector: The NVMeOF connector was upgraded in the previous release to support MDRAID replication, but we forgot to mention it. The connector continues to support MDRAID replication in this release. fixes: - | `Bug #1916264 `_: fixed a regression in the NVMeOF connector that prevented volume attachments for drivers not using MDRAID replication. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/ussuri-release-979d709dfa7df068.yaml0000664000175000017500000000026100000000000025335 0ustar00zuulzuul00000000000000--- other: - | This release contains some minor driver fixes. - | Please keep in mind that the minum version of Python supported by this release is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/veritas-hyperscale-connector-fe56cec68b1947cd.yaml0000664000175000017500000000007300000000000030316 0ustar00zuulzuul00000000000000--- features: - Add Veritas HyperScale connector support ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/vmware-vmdk-connector-19e6999e6cae43cd.yaml0000664000175000017500000000021000000000000026662 0ustar00zuulzuul00000000000000--- features: - Added initiator connector 'VmdkConnector' to support backup and restore of vmdk volumes by Cinder backup service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/wait-mpath-io-703605e74ee009ef.yaml0000664000175000017500000000103700000000000024743 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2067949 `_: Fixed issue where we try to write into a multipath device and fail since it is not ready for I/O. Now we wait until the I/O is likely to succeed. We introduced 2 new config options to make the wait time configurable: * ``wait_mpath_device_attempts`` * ``wait_mpath_device_interval`` These options defaults to 4 attempts and 1 second interval respectively. See help text of the config options for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/notes/yoga-known-issues-f1248af0e328d63e.yaml0000664000175000017500000000060600000000000025743 0ustar00zuulzuul00000000000000--- prelude: > Welcome to the Yoga release of the os-brick library. issues: - | At release time, we became aware that recent changes to the nvmeof connector were tested only in configurations where the NVMe storage backend supports namespace AER. One issue arising from this is being tracked by `Bug #1961102 `_. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6557589 os_brick-6.11.0/releasenotes/source/0000775000175000017500000000000000000000000017406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/2023.1.rst0000664000175000017500000000017700000000000020672 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: 2023.1-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020660 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020660 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/2024.2.rst0000664000175000017500000000020200000000000020661 0ustar00zuulzuul00000000000000=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6557589 os_brick-6.11.0/releasenotes/source/_static/0000775000175000017500000000000000000000000021034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000023305 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6597588 os_brick-6.11.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021543 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000024014 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/conf.py0000664000175000017500000000330400000000000020705 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # os-brick Release Notes documentation build configuration file # # Refer to the Sphinx documentation for advice on configuring this file: # # http://www.sphinx-doc.org/en/stable/config.html # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'reno.sphinxext', 'openstackdocstheme', ] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2015, Cinder Developers' # Release notes are unversioned, so we don't need to set version and release version = '' release = '' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_repo_name = 'openstack/os-brick' openstackdocs_bug_project = 'os-brick' openstackdocs_bug_tag = '' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/index.rst0000664000175000017500000000044100000000000021246 0ustar00zuulzuul00000000000000======================== os-brick Release Notes ======================== .. toctree:: :maxdepth: 1 unreleased 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/mitaka.rst0000664000175000017500000000021100000000000021400 0ustar00zuulzuul00000000000000=========================== Mitaka Series Release Notes =========================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/newton.rst0000664000175000017500000000021600000000000021451 0ustar00zuulzuul00000000000000============================= Newton Series Release Notes ============================= .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/ocata.rst0000664000175000017500000000021400000000000021224 0ustar00zuulzuul00000000000000============================= Ocata Series Release Notes ============================= .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000021070 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/queens.rst0000664000175000017500000000020200000000000021432 0ustar00zuulzuul00000000000000=========================== Queens Series Release Notes =========================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000021262 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/stein.rst0000664000175000017500000000017600000000000021266 0ustar00zuulzuul00000000000000========================== Stein Series Release Notes ========================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/train.rst0000664000175000017500000000020300000000000021250 0ustar00zuulzuul00000000000000============================ Train Series Release Notes ============================ .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/unreleased.rst0000664000175000017500000000016000000000000022264 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021464 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/victoria.rst0000664000175000017500000000020700000000000021757 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: victoria-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/wallaby.rst0000664000175000017500000000020300000000000021566 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: wallaby-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/xena.rst0000664000175000017500000000016700000000000021077 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: xena-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/yoga.rst0000664000175000017500000000016700000000000021103 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: yoga-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/releasenotes/source/zed.rst0000664000175000017500000000016300000000000020722 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: zed-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/requirements.txt0000664000175000017500000000117600000000000016706 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=5.8.0 # Apache-2.0 oslo.concurrency>=5.0.0 # Apache-2.0 oslo.config>=9.0.0 # Apache-2.0 oslo.context>=4.1.0 # Apache-2.0 oslo.log>=4.8.0 # Apache-2.0 oslo.i18n>=5.1.0 # Apache-2.0 oslo.privsep>=3.0.0 # Apache-2.0 oslo.serialization>=4.3.0 # Apache-2.0 oslo.service>=2.8.0 # Apache-2.0 oslo.utils>=6.0.0 # Apache-2.0 requests>=2.25.1 # Apache-2.0 tenacity>=6.3.1 # Apache-2.0 os-win>=5.7.0 # Apache-2.0 psutil>=5.7.2 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6597588 os_brick-6.11.0/setup.cfg0000664000175000017500000000254400000000000015243 0ustar00zuulzuul00000000000000[metadata] name = os-brick description = OpenStack Cinder brick library for managing local volume attaches long_description = file: README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org url = https://docs.openstack.org/os-brick/ python_requires = >=3.9 classifiers = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 [files] packages = os_brick data_files = etc/ = etc/* [entry_points] oslo.config.opts = os_brick = os_brick.opts:list_opts oslo.config.opts.defaults = os_brick = os_brick.opts:set_defaults [mypy] show_column_numbers = true show_error_context = true ignore_missing_imports = true follow_imports = skip incremental = true check_untyped_defs = true warn_unused_ignores = true show_error_codes = true pretty = true html_report = mypy-report no_implicit_optional = true [options] packages = os_brick [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/setup.py0000664000175000017500000000127100000000000015130 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/test-requirements.txt0000664000175000017500000000125300000000000017657 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=7.0.0,<7.1.0 # Apache-2.0 flake8-import-order # LGPLv3 flake8-logging-format>=0.6.0 # Apache-2.0 coverage>=5.5 # Apache-2.0 ddt>=1.4.1 # MIT oslotest>=4.5.0 # Apache-2.0 testscenarios>=0.5.0 # Apache-2.0/BSD testtools>=2.4.0 # MIT stestr>=3.2.1 # Apache-2.0 oslo.vmware>=4.0.0 # Apache-2.0 castellan>=3.10.0 # Apache-2.0 doc8>=0.8.1 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD bandit>=1.7.0,<1.8.0 # Apache-2.0 mypy>=1.2.0 # MIT eventlet>=0.30.1,!=0.32.0 # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1740149696.6597588 os_brick-6.11.0/tools/0000775000175000017500000000000000000000000014555 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/tools/coding-checks.sh0000775000175000017500000000224500000000000017620 0ustar00zuulzuul00000000000000#!/bin/bash set -eu usage() { echo "Usage: $0 [OPTION]..." echo "Run Cinder's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire os-brick module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options() { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint() { local target="${scriptargs:-HEAD~1}" if [[ "$target" = *"all"* ]]; then files="os_brick" else files=$(git diff --name-only --diff-filter=ACMRU $target -- "*.py") fi if [ -n "${files}" ]; then echo "Running pylint against:" printf "\t%s\n" "${files[@]}" pylint --rcfile=.pylintrc --output-format=colorized ${files} -E -j 0 else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/tools/fast8.sh0000775000175000017500000000103600000000000016141 0ustar00zuulzuul00000000000000#!/bin/bash NUM_COMMITS=${FAST8_NUM_COMMITS:-1} cd $(dirname "$0")/.. CHANGED="" CHANGED+="$(git diff --name-only HEAD~${NUM_COMMITS} \*.py | tr '\n' ' ')" while [[ -z $CHANGED ]]; do # Search back until we find a commit containing python files NUM_COMMITS=$((NUM_COMMITS + 1)) CHANGED+="$(git diff --name-only HEAD~${NUM_COMMITS} \*.py | tr '\n' ' ')" ; done # Skip files that don't exist # (have been git rm'd) CHECK="" for FILE in $CHANGED; do if [ -f "$FILE" ]; then CHECK+="$FILE " fi done flake8 $CHECK ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/tools/generate_connector_list.py0000775000175000017500000001405500000000000022036 0ustar00zuulzuul00000000000000#! /usr/bin/env python3 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generate list of os-brick connectors""" import argparse import inspect import json import operator import os from pydoc import locate import textwrap from os_brick.initiator import connector parser = argparse.ArgumentParser(prog="generate_connector_list") parser.add_argument("--format", default='str', choices=['str', 'dict'], help="Output format type") # Keep backwards compatibilty with the gate-docs test # The tests pass ['docs'] on the cmdln, but it's never been used. parser.add_argument("output_list", default=None, nargs='?') def _ensure_loaded(connector_list): """Loads everything in a given path. This will make sure all classes have been loaded and therefore all decorators have registered class. :param start_path: The starting path to load. """ classes = [] for conn in connector_list: try: conn_class = locate(conn) classes.append(conn_class) except Exception: pass return classes def get_connectors(): """Get a list of all connectors.""" classes = _ensure_loaded(connector._get_connector_list()) return [DriverInfo(x) for x in classes] class DriverInfo(object): """Information about Connector implementations.""" def __init__(self, cls): self.cls = cls self.desc = cls.__doc__ self.class_name = cls.__name__ self.class_fqn = '{}.{}'.format(inspect.getmodule(cls).__name__, self.class_name) self.platform = getattr(cls, 'platform', None) self.os_type = getattr(cls, 'os_type', None) def __str__(self): return self.class_name def __repr__(self): return self.class_fqn def __hash__(self): return hash(self.class_fqn) class Output(object): def __init__(self, base_dir, output_list): # At this point we don't care what was passed in, just a trigger # to write this out to the doc tree for now self.connector_file = None if output_list: self.connector_file = open( '%s/doc/source/connectors.rst' % base_dir, 'w+') self.connector_file.write('===================\n') self.connector_file.write('Available Connectors\n') self.connector_file.write('===================\n\n') def __enter__(self): return self def __exit__(self, type, value, traceback): if self.connector_file: self.connector_file.close() def write(self, text): if self.connector_file: self.connector_file.write('%s\n' % text) else: print(text) def format_description(desc, output): desc = desc or '' lines = desc.rstrip('\n').split('\n') output.write('* Description: %s' % lines[0]) output.write('') output.write(textwrap.dedent('\n'.join(lines[1:]))) def format_options(connector_options, output): if connector_options and len(connector_options) > 0: output.write('* Driver Configuration Options:') output.write('') output.write('.. list-table:: **Driver configuration options**') output.write(' :header-rows: 1') output.write(' :widths: 14 30') output.write('') output.write(' * - Name = Default Value') output.write(' - (Type) Description') sorted_options = sorted(connector_options, key=operator.attrgetter('name')) for opt in sorted_options: output.write(' * - %s = %s' % (opt.name, opt.default)) output.write(' - (%s) %s' % (opt.type, opt.help)) output.write('') def print_connectors(connectors, config_name, output, section_char='-'): for conn in sorted(connectors, key=lambda x: x.class_name): conn_name = conn.class_name output.write(conn_name) output.write(section_char * len(conn_name)) if conn.platform: output.write('* Platform: %s' % conn.platform) if conn.os_type: output.write('* OS Type: %s' % conn.os_type) output.write('* %s=%s' % (config_name, conn.class_fqn)) format_description(conn.desc, output) output.write('') output.write('') def output_str(cinder_root, args): with Output(cinder_root, args.output_list) as output: output.write('Connectors') output.write('==============') connectors = get_connectors() print_connectors(connectors, 'connector', output, '~') def collect_connector_info(connector): """Build the dictionary that describes this connector.""" info = {'name': connector.class_name, 'fqn': connector.class_fqn, 'description': connector.desc, 'platform': connector.platform, 'os_type': connector.os_type, } return info def output_dict(): """Output the results as a JSON dict.""" connector_list = [] connectors = get_connectors() for conn in connectors: connector_list.append(collect_connector_info(conn)) print(json.dumps(connector_list)) def main(): tools_dir = os.path.dirname(os.path.abspath(__file__)) brick_root = os.path.dirname(tools_dir) cur_dir = os.getcwd() os.chdir(brick_root) args = parser.parse_args() try: if args.format == 'str': output_str(brick_root, args) elif args.format == 'dict': output_dict() finally: os.chdir(cur_dir) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/tools/lintstack.py0000775000175000017500000001521100000000000017126 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright (c) 2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """pylint error checking.""" from io import StringIO import json import re import sys from pylint import lint from pylint.reporters import text ignore_codes = [ # Note(maoy): E1103 is error code related to partial type inference "E1103" ] ignore_messages = [ # Note(fengqian): this message is the pattern of [E0611]. # It should be ignored because use six module to keep py3.X compatibility. "No name 'urllib' in module '_MovedItems'", # Note(xyang): these error messages are for the code [E1101]. # They should be ignored because 'sha256' and 'sha224' are functions in # 'hashlib'. "Module 'hashlib' has no 'sha256' member", "Module 'hashlib' has no 'sha224' member", ] ignore_modules = ["os_brick/tests/", "tools/lintstack.head.py"] KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" class LintOutput(object): _cached_filename = None _cached_content = None def __init__(self, filename, lineno, line_content, code, message, lintoutput): self.filename = filename self.lineno = lineno self.line_content = line_content self.code = code self.message = message self.lintoutput = lintoutput @classmethod def from_line(cls, line): m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) matched = m.groups() filename, lineno, code, message = (matched[0], int(matched[1]), matched[2], matched[-1]) if cls._cached_filename != filename: with open(filename) as f: cls._cached_content = list(f.readlines()) cls._cached_filename = filename line_content = cls._cached_content[lineno - 1].rstrip() return cls(filename, lineno, line_content, code, message, line.rstrip()) @classmethod def from_msg_to_dict(cls, msg): """Converts pytlint message to a unique-error dictionary. From the output of pylint msg, to a dict, where each key is a unique error identifier, value is a list of LintOutput """ result = {} for line in msg.splitlines(): obj = cls.from_line(line) if obj.is_ignored(): continue key = obj.key() if key not in result: result[key] = [] result[key].append(obj) return result def is_ignored(self): if self.code in ignore_codes: return True if any(self.filename.startswith(name) for name in ignore_modules): return True return False def key(self): if self.code in ["E1101", "E1103"]: # These two types of errors are like Foo class has no member bar. # We discard the source code so that the error will be ignored # next time another Foo.bar is encountered. return self.message, "" return self.message, self.line_content.strip() def json(self): return json.dumps(self.__dict__) def review_str(self): return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" "%(code)s: %(message)s" % {'filename': self.filename, 'lineno': self.lineno, 'line_content': self.line_content, 'code': self.code, 'message': self.message}) class ErrorKeys(object): @classmethod def print_json(cls, errors, output=sys.stdout): print("# automatically generated by tools/lintstack.py", file=output) for i in sorted(errors.keys()): print(json.dumps(i), file=output) @classmethod def from_file(cls, filename): keys = set() for line in open(filename): if line and line[0] != "#": d = json.loads(line) keys.add(tuple(d)) return keys def run_pylint(): buff = StringIO() reporter = text.ParseableTextReporter(output=buff) args = ["--include-ids=y", "-E", "os_brick"] lint.Run(args, reporter=reporter, exit=False) val = buff.getvalue() buff.close() return val def generate_error_keys(msg=None): print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE) if msg is None: msg = run_pylint() errors = LintOutput.from_msg_to_dict(msg) with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: ErrorKeys.print_json(errors, output=f) def validate(newmsg=None): print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE) known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) if newmsg is None: print("Running pylint. Be patient...") newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) print("Unique errors reported by pylint: was %d, now %d." % (len(known), len(errors))) passed = True for err_key, err_list in errors.items(): for err in err_list: if err_key not in known: print(err.lintoutput) print() passed = False if passed: print("Congrats! pylint check passed.") redundant = known - set(errors.keys()) if redundant: print("Extra credit: some known pylint exceptions disappeared.") for i in sorted(redundant): print(json.dumps(i)) print("Consider regenerating the exception file if you will.") else: print("Please fix the errors above. If you believe they are false " "positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def usage(): print("""Usage: tools/lintstack.py [generate|validate] To generate pylint_exceptions file: tools/lintstack.py generate To validate the current commit: tools/lintstack.py """) def main(): option = "validate" if len(sys.argv) > 1: option = sys.argv[1] if option == "generate": generate_error_keys() elif option == "validate": validate() else: usage() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/tools/lintstack.sh0000775000175000017500000000420600000000000017112 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # Copyright (c) 2012-2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Use lintstack.py to compare pylint errors. # We run pylint twice, once on HEAD, once on the code before the latest # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) # Get the current branch name. GITHEAD=`git rev-parse --abbrev-ref HEAD` if [[ "$GITHEAD" == "HEAD" ]]; then # In detached head mode, get revision number instead GITHEAD=`git rev-parse HEAD` echo "Currently we are at commit $GITHEAD" else echo "Currently we are at branch $GITHEAD" fi cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then # The HEAD is a Merge commit. Here, the patch to review is # HEAD^2, the master branch is at HEAD^1, and the patch was # written based on HEAD^2~1. PREV_COMMIT=`git rev-parse HEAD^2~1` git checkout HEAD~1 # The git merge is necessary for reviews with a series of patches. # If not, this is a no-op so won't hurt either. git merge $PREV_COMMIT else # The HEAD is not a merge commit. This won't happen on gerrit. # Most likely you are running against your own patch locally. # We assume the patch to examine is HEAD, and we compare it against # HEAD~1 git checkout HEAD~1 fi # First generate tools/pylint_exceptions from HEAD~1 $TOOLS_DIR/lintstack.head.py generate # Then use that as a reference to compare against HEAD git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/tools/mypywrap.sh0000664000175000017500000000137400000000000017006 0ustar00zuulzuul00000000000000#!/bin/sh # # A wrapper around mypy that allows us to specify what files to run 'mypy' type # checks on. Intended to be invoked via tox: # # tox -e mypy # # Eventually this should go away once we have either converted everything or # converted enough and ignored [1] the rest. # # [1] http://mypy.readthedocs.io/en/latest/config_file.html#per-module-flags ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" #export MYPYPATH=$ROOT_DIR/../os_brick/tests/stubs/ python -m mypy -V if [ $# -eq 0 ]; then # if no arguments provided, use the standard converted lists lines=$(grep -v '#' $ROOT_DIR/../mypy-files.txt) python -m mypy $OS_MYPY_OPTS ${lines[@]} else # else test what the user asked us to python -m mypy $OS_MYPY_OPTS $@ fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1740149615.0 os_brick-6.11.0/tox.ini0000664000175000017500000001111200000000000014724 0ustar00zuulzuul00000000000000[tox] minversion = 4.0.0 # specify virtualenv here to keep local runs consistent with the # gate (it sets the versions of pip, setuptools, and wheel) requires = virtualenv>=20.17.1 # this allows tox to infer the base python from the environment name # and override any basepython configured in this file ignore_basepython_conflict=true envlist = py3,pep8 [testenv] basepython = python3 usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=./os_brick/tests OS_TEST_TIMEOUT=60 OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt # By default stestr will set concurrency # to ncpu, to specify something else use # the concurrency= option. # call example: 'tox -epy37 -- --concurrency=4' commands = stestr run {posargs} stestr slowest allowlist_externals = bash find passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY [testenv:debug] commands = find . -type f -name "*.pyc" -delete oslo_debug_helper {posargs} [testenv:pep8] commands = flake8 {posargs} . doc8 [testenv:fast8] allowlist_externals = {toxinidir}/tools/fast8.sh commands = {toxinidir}/tools/fast8.sh [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt # B101: skip assert used checks, they are validly used for mypy commands: bandit -r os_brick -x os_brick/tests -n5 -sB101 [testenv:pylint] allowlist_externals = {toxinidir}/tools/coding-checks.sh deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt pylint==2.17.3 commands = {toxinidir}/tools/coding-checks.sh --pylint {posargs:all} [testenv:venv] commands = {posargs} [testenv:cover] # To see the report of missing coverage add to commands # coverage report --show-missing setenv = {[testenv]setenv} PYTHON=coverage run --source os_brick --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage/xml [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -fr doc/build doc/source/contributor/api/ .autogenerated sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html allowlist_externals = rm [testenv:pdf-docs] deps = {[testenv:docs]deps} commands = {[testenv:docs]commands} sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf allowlist_externals = {[testenv:docs]allowlist_externals} make [testenv:releasenotes] deps = {[testenv:docs]deps} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] # Following checks are ignored on purpose. # # E251 unexpected spaces around keyword / parameter equals # reason: no improvement in readability # W503 line break before binary operator # reason: pep8 itself is not sure about this one and # reversed this rule in 2016 # W504 line break after binary operator # reason: no agreement on this being universally # preferable for our code. Disabled to keep checking # tools from getting in our way with regards to this. # H101 include name with TODO # reason: no real benefit # G200 Logging statements should not include the exception # reason: Many existing cases of this that may be legitimate # show-source = True ignore = E251,W503,W504,H101,G200 enable-extensions=H106,H203,H204,H205 builtins = _ exclude=.venv,.git,.tox,dist,*lib/python*,*egg,build max-complexity=30 application-import-names = os_brick import-order-style = pep8 [doc8] ignore-path=.tox,*.egg-info,doc/src/api,doc/source/drivers.rst,doc/build,.eggs/*/EGG-INFO/*.txt,doc/source/configuration/tables,./*.txt,releasenotes/build extension=.txt,.rst,.inc [hacking] import_exceptions = os_brick.i18n, typing [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files, and develop mode disabled # explicitly to avoid unnecessarily installing the checked-out repo too skip_install = True deps = bindep commands = bindep {posargs} usedevelop = False [testenv:mypy] description = Run type checks. setenv = OS_MYPY_OPTS=--install-types --non-interactive commands = bash tools/mypywrap.sh {posargs}